| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | from dataclasses import dataclass |
| | from typing import Any, Dict, List, Optional, Tuple, Union |
| | import os |
| | import json |
| |
|
| | import torch |
| | from torch import nn |
| | from torch.nn import functional as F |
| |
|
| | from diffusers.configuration_utils import ConfigMixin, register_to_config |
| | from diffusers.utils import BaseOutput, logging |
| | from diffusers.models.embeddings import TimestepEmbedding, Timesteps |
| | from diffusers import ModelMixin |
| | from .controlnet_unet_blocks import ( |
| | CrossAttnDownBlock3D, |
| | DownBlock3D, |
| | UNetMidBlock3DCrossAttn, |
| | get_down_block, |
| | ) |
| | from .resnet import InflatedConv3d |
| |
|
| | from diffusers.models.unet_2d_condition import UNet2DConditionModel |
| | from diffusers.models.cross_attention import AttnProcessor |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | @dataclass |
| | class ControlNetOutput(BaseOutput): |
| | down_block_res_samples: Tuple[torch.Tensor] |
| | mid_block_res_sample: torch.Tensor |
| |
|
| |
|
| | class ControlNetConditioningEmbedding(nn.Module): |
| | """ |
| | Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN |
| | [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized |
| | training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the |
| | convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides |
| | (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full |
| | model) to encode image-space conditions ... into feature maps ..." |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | conditioning_embedding_channels: int, |
| | conditioning_channels: int = 3, |
| | block_out_channels: Tuple[int] = (16, 32, 96, 256), |
| | ): |
| | super().__init__() |
| |
|
| | self.conv_in = InflatedConv3d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) |
| |
|
| | self.blocks = nn.ModuleList([]) |
| |
|
| | for i in range(len(block_out_channels) - 1): |
| | channel_in = block_out_channels[i] |
| | channel_out = block_out_channels[i + 1] |
| | self.blocks.append(InflatedConv3d(channel_in, channel_in, kernel_size=3, padding=1)) |
| | self.blocks.append(InflatedConv3d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) |
| |
|
| | self.conv_out = zero_module( |
| | InflatedConv3d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) |
| | ) |
| |
|
| | def forward(self, conditioning): |
| | embedding = self.conv_in(conditioning) |
| | embedding = F.silu(embedding) |
| |
|
| | for block in self.blocks: |
| | embedding = block(embedding) |
| | embedding = F.silu(embedding) |
| |
|
| | embedding = self.conv_out(embedding) |
| |
|
| | return embedding |
| |
|
| |
|
| | class ControlNetModel3D(ModelMixin, ConfigMixin): |
| | _supports_gradient_checkpointing = True |
| |
|
| | @register_to_config |
| | def __init__( |
| | self, |
| | in_channels: int = 4, |
| | flip_sin_to_cos: bool = True, |
| | freq_shift: int = 0, |
| | down_block_types: Tuple[str] = ( |
| | "CrossAttnDownBlock3D", |
| | "CrossAttnDownBlock3D", |
| | "CrossAttnDownBlock3D", |
| | "DownBlock3D", |
| | ), |
| | only_cross_attention: Union[bool, Tuple[bool]] = False, |
| | block_out_channels: Tuple[int] = (320, 640, 1280, 1280), |
| | layers_per_block: int = 2, |
| | downsample_padding: int = 1, |
| | mid_block_scale_factor: float = 1, |
| | act_fn: str = "silu", |
| | norm_num_groups: Optional[int] = 32, |
| | norm_eps: float = 1e-5, |
| | cross_attention_dim: int = 1280, |
| | attention_head_dim: Union[int, Tuple[int]] = 8, |
| | dual_cross_attention: bool = False, |
| | use_linear_projection: bool = False, |
| | class_embed_type: Optional[str] = None, |
| | num_class_embeds: Optional[int] = None, |
| | upcast_attention: bool = False, |
| | resnet_time_scale_shift: str = "default", |
| | projection_class_embeddings_input_dim: Optional[int] = None, |
| | controlnet_conditioning_channel_order: str = "rgb", |
| | conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256), |
| | ): |
| | super().__init__() |
| |
|
| | |
| | if len(block_out_channels) != len(down_block_types): |
| | raise ValueError( |
| | f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." |
| | ) |
| |
|
| | if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): |
| | raise ValueError( |
| | f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." |
| | ) |
| |
|
| | if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): |
| | raise ValueError( |
| | f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." |
| | ) |
| |
|
| | |
| | conv_in_kernel = 3 |
| | conv_in_padding = (conv_in_kernel - 1) // 2 |
| | self.conv_in = InflatedConv3d( |
| | in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding |
| | ) |
| |
|
| | |
| | time_embed_dim = block_out_channels[0] * 4 |
| |
|
| | self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) |
| | timestep_input_dim = block_out_channels[0] |
| |
|
| | self.time_embedding = TimestepEmbedding( |
| | timestep_input_dim, |
| | time_embed_dim, |
| | act_fn=act_fn, |
| | ) |
| |
|
| | |
| | if class_embed_type is None and num_class_embeds is not None: |
| | self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) |
| | elif class_embed_type == "timestep": |
| | self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) |
| | elif class_embed_type == "identity": |
| | self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) |
| | elif class_embed_type == "projection": |
| | if projection_class_embeddings_input_dim is None: |
| | raise ValueError( |
| | "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" |
| | ) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) |
| | else: |
| | self.class_embedding = None |
| |
|
| | |
| | self.controlnet_cond_embedding = ControlNetConditioningEmbedding( |
| | conditioning_embedding_channels=block_out_channels[0], |
| | block_out_channels=conditioning_embedding_out_channels, |
| | ) |
| |
|
| | self.down_blocks = nn.ModuleList([]) |
| | self.controlnet_down_blocks = nn.ModuleList([]) |
| |
|
| | if isinstance(only_cross_attention, bool): |
| | only_cross_attention = [only_cross_attention] * len(down_block_types) |
| |
|
| | if isinstance(attention_head_dim, int): |
| | attention_head_dim = (attention_head_dim,) * len(down_block_types) |
| |
|
| | |
| | output_channel = block_out_channels[0] |
| |
|
| | controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1) |
| | controlnet_block = zero_module(controlnet_block) |
| | self.controlnet_down_blocks.append(controlnet_block) |
| |
|
| | for i, down_block_type in enumerate(down_block_types): |
| | input_channel = output_channel |
| | output_channel = block_out_channels[i] |
| | is_final_block = i == len(block_out_channels) - 1 |
| |
|
| | down_block = get_down_block( |
| | down_block_type, |
| | num_layers=layers_per_block, |
| | in_channels=input_channel, |
| | out_channels=output_channel, |
| | temb_channels=time_embed_dim, |
| | add_downsample=not is_final_block, |
| | resnet_eps=norm_eps, |
| | resnet_act_fn=act_fn, |
| | resnet_groups=norm_num_groups, |
| | cross_attention_dim=cross_attention_dim, |
| | attn_num_head_channels=attention_head_dim[i], |
| | downsample_padding=downsample_padding, |
| | dual_cross_attention=dual_cross_attention, |
| | use_linear_projection=use_linear_projection, |
| | only_cross_attention=only_cross_attention[i], |
| | upcast_attention=upcast_attention, |
| | resnet_time_scale_shift=resnet_time_scale_shift, |
| | ) |
| | self.down_blocks.append(down_block) |
| |
|
| | for _ in range(layers_per_block): |
| | controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1) |
| | controlnet_block = zero_module(controlnet_block) |
| | self.controlnet_down_blocks.append(controlnet_block) |
| |
|
| | if not is_final_block: |
| | controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1) |
| | controlnet_block = zero_module(controlnet_block) |
| | self.controlnet_down_blocks.append(controlnet_block) |
| |
|
| | |
| | mid_block_channel = block_out_channels[-1] |
| |
|
| | controlnet_block = InflatedConv3d(mid_block_channel, mid_block_channel, kernel_size=1) |
| | controlnet_block = zero_module(controlnet_block) |
| | self.controlnet_mid_block = controlnet_block |
| |
|
| | |
| | self.mid_block = UNetMidBlock3DCrossAttn( |
| | in_channels=block_out_channels[-1], |
| | temb_channels=time_embed_dim, |
| | resnet_eps=norm_eps, |
| | resnet_act_fn=act_fn, |
| | output_scale_factor=mid_block_scale_factor, |
| | resnet_time_scale_shift=resnet_time_scale_shift, |
| | cross_attention_dim=cross_attention_dim, |
| | attn_num_head_channels=attention_head_dim[-1], |
| | resnet_groups=norm_num_groups, |
| | dual_cross_attention=dual_cross_attention, |
| | use_linear_projection=use_linear_projection, |
| | upcast_attention=upcast_attention, |
| | ) |
| |
|
| | @classmethod |
| | def from_unet( |
| | cls, |
| | unet: UNet2DConditionModel, |
| | controlnet_conditioning_channel_order: str = "rgb", |
| | conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256), |
| | load_weights_from_unet: bool = True, |
| | ): |
| | r""" |
| | Instantiate Controlnet class from UNet2DConditionModel. |
| | |
| | Parameters: |
| | unet (`UNet2DConditionModel`): |
| | UNet model which weights are copied to the ControlNet. Note that all configuration options are also |
| | copied where applicable. |
| | """ |
| | controlnet = cls( |
| | in_channels=unet.config.in_channels, |
| | flip_sin_to_cos=unet.config.flip_sin_to_cos, |
| | freq_shift=unet.config.freq_shift, |
| | down_block_types=unet.config.down_block_types, |
| | only_cross_attention=unet.config.only_cross_attention, |
| | block_out_channels=unet.config.block_out_channels, |
| | layers_per_block=unet.config.layers_per_block, |
| | downsample_padding=unet.config.downsample_padding, |
| | mid_block_scale_factor=unet.config.mid_block_scale_factor, |
| | act_fn=unet.config.act_fn, |
| | norm_num_groups=unet.config.norm_num_groups, |
| | norm_eps=unet.config.norm_eps, |
| | cross_attention_dim=unet.config.cross_attention_dim, |
| | attention_head_dim=unet.config.attention_head_dim, |
| | use_linear_projection=unet.config.use_linear_projection, |
| | class_embed_type=unet.config.class_embed_type, |
| | num_class_embeds=unet.config.num_class_embeds, |
| | upcast_attention=unet.config.upcast_attention, |
| | resnet_time_scale_shift=unet.config.resnet_time_scale_shift, |
| | projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, |
| | controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, |
| | conditioning_embedding_out_channels=conditioning_embedding_out_channels, |
| | ) |
| |
|
| | if load_weights_from_unet: |
| | controlnet.conv_in.load_state_dict(unet.conv_in.state_dict()) |
| | controlnet.time_proj.load_state_dict(unet.time_proj.state_dict()) |
| | controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) |
| |
|
| | if controlnet.class_embedding: |
| | controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict()) |
| |
|
| | controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict()) |
| | controlnet.mid_block.load_state_dict(unet.mid_block.state_dict()) |
| |
|
| | return controlnet |
| |
|
| | @property |
| | |
| | def attn_processors(self) -> Dict[str, AttnProcessor]: |
| | r""" |
| | Returns: |
| | `dict` of attention processors: A dictionary containing all attention processors used in the model with |
| | indexed by its weight name. |
| | """ |
| | |
| | processors = {} |
| |
|
| | def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttnProcessor]): |
| | if hasattr(module, "set_processor"): |
| | processors[f"{name}.processor"] = module.processor |
| |
|
| | for sub_name, child in module.named_children(): |
| | fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
| |
|
| | return processors |
| |
|
| | for name, module in self.named_children(): |
| | fn_recursive_add_processors(name, module, processors) |
| |
|
| | return processors |
| |
|
| | |
| | def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]): |
| | r""" |
| | Parameters: |
| | `processor (`dict` of `AttnProcessor` or `AttnProcessor`): |
| | The instantiated processor class or a dictionary of processor classes that will be set as the processor |
| | of **all** `Attention` layers. |
| | In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.: |
| | |
| | """ |
| | count = len(self.attn_processors.keys()) |
| |
|
| | if isinstance(processor, dict) and len(processor) != count: |
| | raise ValueError( |
| | f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
| | f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
| | ) |
| |
|
| | def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): |
| | if hasattr(module, "set_processor"): |
| | if not isinstance(processor, dict): |
| | module.set_processor(processor) |
| | else: |
| | module.set_processor(processor.pop(f"{name}.processor")) |
| |
|
| | for sub_name, child in module.named_children(): |
| | fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
| |
|
| | for name, module in self.named_children(): |
| | fn_recursive_attn_processor(name, module, processor) |
| |
|
| | |
| | def set_attention_slice(self, slice_size): |
| | r""" |
| | Enable sliced attention computation. |
| | |
| | When this option is enabled, the attention module will split the input tensor in slices, to compute attention |
| | in several steps. This is useful to save some memory in exchange for a small speed decrease. |
| | |
| | Args: |
| | slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): |
| | When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If |
| | `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is |
| | provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` |
| | must be a multiple of `slice_size`. |
| | """ |
| | sliceable_head_dims = [] |
| |
|
| | def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): |
| | if hasattr(module, "set_attention_slice"): |
| | sliceable_head_dims.append(module.sliceable_head_dim) |
| |
|
| | for child in module.children(): |
| | fn_recursive_retrieve_sliceable_dims(child) |
| |
|
| | |
| | for module in self.children(): |
| | fn_recursive_retrieve_sliceable_dims(module) |
| |
|
| | num_sliceable_layers = len(sliceable_head_dims) |
| |
|
| | if slice_size == "auto": |
| | |
| | |
| | slice_size = [dim // 2 for dim in sliceable_head_dims] |
| | elif slice_size == "max": |
| | |
| | slice_size = num_sliceable_layers * [1] |
| |
|
| | slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size |
| |
|
| | if len(slice_size) != len(sliceable_head_dims): |
| | raise ValueError( |
| | f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" |
| | f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." |
| | ) |
| |
|
| | for i in range(len(slice_size)): |
| | size = slice_size[i] |
| | dim = sliceable_head_dims[i] |
| | if size is not None and size > dim: |
| | raise ValueError(f"size {size} has to be smaller or equal to {dim}.") |
| |
|
| | |
| | |
| | |
| | def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): |
| | if hasattr(module, "set_attention_slice"): |
| | module.set_attention_slice(slice_size.pop()) |
| |
|
| | for child in module.children(): |
| | fn_recursive_set_attention_slice(child, slice_size) |
| |
|
| | reversed_slice_size = list(reversed(slice_size)) |
| | for module in self.children(): |
| | fn_recursive_set_attention_slice(module, reversed_slice_size) |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D)): |
| | module.gradient_checkpointing = value |
| |
|
| | def forward( |
| | self, |
| | sample: torch.FloatTensor, |
| | timestep: Union[torch.Tensor, float, int], |
| | encoder_hidden_states: torch.Tensor, |
| | controlnet_cond: torch.FloatTensor, |
| | conditioning_scale: float = 1.0, |
| | class_labels: Optional[torch.Tensor] = None, |
| | timestep_cond: Optional[torch.Tensor] = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
| | return_dict: bool = True, |
| | ) -> Union[ControlNetOutput, Tuple]: |
| | |
| | channel_order = self.config.controlnet_conditioning_channel_order |
| |
|
| | if channel_order == "rgb": |
| | |
| | ... |
| | elif channel_order == "bgr": |
| | controlnet_cond = torch.flip(controlnet_cond, dims=[1]) |
| | else: |
| | raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") |
| |
|
| | |
| | if attention_mask is not None: |
| | attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 |
| | attention_mask = attention_mask.unsqueeze(1) |
| |
|
| | |
| | timesteps = timestep |
| | if not torch.is_tensor(timesteps): |
| | |
| | |
| | is_mps = sample.device.type == "mps" |
| | if isinstance(timestep, float): |
| | dtype = torch.float32 if is_mps else torch.float64 |
| | else: |
| | dtype = torch.int32 if is_mps else torch.int64 |
| | timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) |
| | elif len(timesteps.shape) == 0: |
| | timesteps = timesteps[None].to(sample.device) |
| |
|
| | |
| | timesteps = timesteps.expand(sample.shape[0]) |
| |
|
| | t_emb = self.time_proj(timesteps) |
| |
|
| | |
| | |
| | |
| | t_emb = t_emb.to(dtype=self.dtype) |
| |
|
| | emb = self.time_embedding(t_emb, timestep_cond) |
| |
|
| | if self.class_embedding is not None: |
| | if class_labels is None: |
| | raise ValueError("class_labels should be provided when num_class_embeds > 0") |
| |
|
| | if self.config.class_embed_type == "timestep": |
| | class_labels = self.time_proj(class_labels) |
| |
|
| | class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) |
| | emb = emb + class_emb |
| |
|
| | |
| | sample = self.conv_in(sample) |
| |
|
| | controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) |
| |
|
| | sample += controlnet_cond |
| |
|
| | |
| | down_block_res_samples = (sample,) |
| | for downsample_block in self.down_blocks: |
| | if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: |
| | sample, res_samples = downsample_block( |
| | hidden_states=sample, |
| | temb=emb, |
| | encoder_hidden_states=encoder_hidden_states, |
| | attention_mask=attention_mask, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | ) |
| | else: |
| | sample, res_samples = downsample_block(hidden_states=sample, temb=emb) |
| |
|
| | down_block_res_samples += res_samples |
| |
|
| | |
| | if self.mid_block is not None: |
| | sample = self.mid_block( |
| | sample, |
| | emb, |
| | encoder_hidden_states=encoder_hidden_states, |
| | attention_mask=attention_mask, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | ) |
| |
|
| | |
| |
|
| | controlnet_down_block_res_samples = () |
| |
|
| | for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): |
| | down_block_res_sample = controlnet_block(down_block_res_sample) |
| | controlnet_down_block_res_samples += (down_block_res_sample,) |
| |
|
| | down_block_res_samples = controlnet_down_block_res_samples |
| |
|
| | mid_block_res_sample = self.controlnet_mid_block(sample) |
| |
|
| | |
| | down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] |
| | mid_block_res_sample *= conditioning_scale |
| |
|
| | if not return_dict: |
| | return (down_block_res_samples, mid_block_res_sample) |
| |
|
| | return ControlNetOutput( |
| | down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample |
| | ) |
| |
|
| | @classmethod |
| | def from_pretrained_2d(cls, pretrained_model_path, control_path=None): |
| | config_file = os.path.join(pretrained_model_path, 'config.json') |
| | if not os.path.isfile(config_file): |
| | raise RuntimeError(f"{config_file} does not exist") |
| | with open(config_file, "r") as f: |
| | config = json.load(f) |
| | config["_class_name"] = cls.__name__ |
| | config["down_block_types"] = [ |
| | "CrossAttnDownBlock3D", |
| | "CrossAttnDownBlock3D", |
| | "CrossAttnDownBlock3D", |
| | "DownBlock3D" |
| | ] |
| |
|
| | from diffusers.utils import WEIGHTS_NAME |
| | model = cls.from_config(config) |
| | if control_path is None: |
| | model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME) |
| | state_dict = torch.load(model_file, map_location="cpu") |
| | else: |
| | model_file = control_path |
| | state_dict = torch.load(model_file, map_location="cpu") |
| | state_dict = {k[14:]: state_dict[k] for k in state_dict.keys()} |
| | |
| | |
| | for k, v in model.state_dict().items(): |
| | if '_temp.' in k: |
| | state_dict.update({k: v}) |
| | model.load_state_dict(state_dict) |
| |
|
| | return model |
| |
|
| | def zero_module(module): |
| | for p in module.parameters(): |
| | nn.init.zeros_(p) |
| | return module |
| |
|