text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin from ...loaders.single_file_model import FromOriginalModelMixin from ...utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers from ..activations import get_activation from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0, ) from ..embeddings import ( GaussianFourierProjection, GLIGENTextBoundingboxProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from ..modeling_utils import ModelMixin from .unet_2d_blocks import ( get_down_block, get_mid_block, get_up_block, ) logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet2DConditionOutput(BaseOutput): """ The output of [`UNet2DConditionModel`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.Tensor = None class UNet2DConditionModel( ModelMixin, ConfigMixin, FromOriginalModelMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin ): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. addition_time_embed_dim: (`int`, *optional*, defaults to `None`): Dimension for the timestep embeddings. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` otherwise. """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"] _skip_layerwise_casting_patterns = ["norm"] _repeated_blocks = ["BasicTransformerBlock"] @register_to_config def __init__( self, sample_size: Optional[Union[int, Tuple[int, int]]] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, dropout: float = 0.0, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: float = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, attention_type: str = "default", class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads: int = 64, ): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs self._check_config( down_block_types=down_block_types, up_block_types=up_block_types, only_cross_attention=only_cross_attention, block_out_channels=block_out_channels, layers_per_block=layers_per_block, cross_attention_dim=cross_attention_dim, transformer_layers_per_block=transformer_layers_per_block, reverse_transformer_layers_per_block=reverse_transformer_layers_per_block, attention_head_dim=attention_head_dim, num_attention_heads=num_attention_heads, ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time time_embed_dim, timestep_input_dim = self._set_time_proj( time_embedding_type, block_out_channels=block_out_channels, flip_sin_to_cos=flip_sin_to_cos, freq_shift=freq_shift, time_embedding_dim=time_embedding_dim, ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) self._set_encoder_hid_proj( encoder_hid_dim_type, cross_attention_dim=cross_attention_dim, encoder_hid_dim=encoder_hid_dim, ) # class embedding self._set_class_embedding( class_embed_type, act_fn=act_fn, num_class_embeds=num_class_embeds, projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, time_embed_dim=time_embed_dim, timestep_input_dim=timestep_input_dim, ) self._set_add_embedding( addition_embed_type, addition_embed_type_num_heads=addition_embed_type_num_heads, addition_time_embed_dim=addition_time_embed_dim, cross_attention_dim=cross_attention_dim, encoder_hid_dim=encoder_hid_dim, flip_sin_to_cos=flip_sin_to_cos, freq_shift=freq_shift, projection_class_embeddings_input_dim=projection_class_embeddings_input_dim, time_embed_dim=time_embed_dim, ) if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout, ) self.down_blocks.append(down_block) # mid self.mid_block = get_mid_block( mid_block_type, temb_channels=blocks_time_embed_dim, in_channels=block_out_channels[-1], resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, output_scale_factor=mid_block_scale_factor, transformer_layers_per_block=transformer_layers_per_block[-1], num_attention_heads=num_attention_heads[-1], cross_attention_dim=cross_attention_dim[-1], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, mid_block_only_cross_attention=mid_block_only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[-1], dropout=dropout, ) # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = ( list(reversed(transformer_layers_per_block)) if reverse_transformer_layers_per_block is None else reverse_transformer_layers_per_block ) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resolution_idx=i, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout, ) self.up_blocks.append(up_block) # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim) def _check_config( self, down_block_types: Tuple[str], up_block_types: Tuple[str], only_cross_attention: Union[bool, Tuple[bool]], block_out_channels: Tuple[int], layers_per_block: Union[int, Tuple[int]], cross_attention_dim: Union[int, Tuple[int]], transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]], reverse_transformer_layers_per_block: bool, attention_head_dim: int, num_attention_heads: Optional[Union[int, Tuple[int]]], ): if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None: for layer_number_per_block in transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.") def _set_time_proj( self, time_embedding_type: str, block_out_channels: int, flip_sin_to_cos: bool, freq_shift: float, time_embedding_dim: int, ) -> Tuple[int, int]: if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) return time_embed_dim, timestep_input_dim def _set_encoder_hid_proj( self, encoder_hid_dim_type: Optional[str], cross_attention_dim: Union[int, Tuple[int]], encoder_hid_dim: Optional[int], ): if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim_type`: {encoder_hid_dim_type} must be None, 'text_proj', 'text_image_proj', or 'image_proj'." ) else: self.encoder_hid_proj = None def _set_class_embedding( self, class_embed_type: Optional[str], act_fn: str, num_class_embeds: Optional[int], projection_class_embeddings_input_dim: Optional[int], time_embed_dim: int, timestep_input_dim: int, ): if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None def _set_add_embedding( self, addition_embed_type: str, addition_embed_type_num_heads: int, addition_time_embed_dim: Optional[int], flip_sin_to_cos: bool, freq_shift: float, cross_attention_dim: Optional[int], encoder_hid_dim: Optional[int], projection_class_embeddings_input_dim: Optional[int], time_embed_dim: int, ): if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError( f"`addition_embed_type`: {addition_embed_type} must be None, 'text', 'text_image', 'text_time', 'image', or 'image_hint'." ) def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int): if attention_type in ["gated", "gated-text-image"]: positive_len = 768 if isinstance(cross_attention_dim, int): positive_len = cross_attention_dim elif isinstance(cross_attention_dim, (list, tuple)): positive_len = cross_attention_dim[0] feature_type = "text-only" if attention_type == "gated" else "text-image" self.position_net = GLIGENTextBoundingboxProjection( positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def enable_freeu(self, s1: float, s2: float, b1: float, b2: float): r"""Enables the FreeU mechanism from https://huggingface.co/papers/2309.11497. The suffixes after the scaling factors represent the stage blocks where they are being applied. Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL. Args: s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to mitigate the "oversmoothing effect" in the enhanced denoising process. b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. """ for i, upsample_block in enumerate(self.up_blocks): setattr(upsample_block, "s1", s1) setattr(upsample_block, "s2", s2) setattr(upsample_block, "b1", b1) setattr(upsample_block, "b2", b2) def disable_freeu(self): """Disables the FreeU mechanism.""" freeu_keys = {"s1", "s2", "b1", "b2"} for i, upsample_block in enumerate(self.up_blocks): for k in freeu_keys: if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None: setattr(upsample_block, k, None) def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def get_time_embed( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int] ) -> Optional[torch.Tensor]: timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" is_npu = sample.device.type == "npu" if isinstance(timestep, float): dtype = torch.float32 if (is_mps or is_npu) else torch.float64 else: dtype = torch.int32 if (is_mps or is_npu) else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) return t_emb def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]: class_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) # `Timesteps` does not contain any weights and will always return f32 tensors # there might be better ways to encapsulate this. class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) return class_emb def get_aug_embed( self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any] ) -> Optional[torch.Tensor]: aug_emb = None if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_image": # Kandinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) aug_emb = self.add_embedding(text_embs, image_embs) elif self.config.addition_embed_type == "text_time": # SDXL - style if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) elif self.config.addition_embed_type == "image": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") aug_emb = self.add_embedding(image_embs) elif self.config.addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet - style if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") hint = added_cond_kwargs.get("hint") aug_emb = self.add_embedding(image_embs, hint) return aug_emb def process_encoder_hidden_states( self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any] ) -> torch.Tensor: if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": # Kandinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj": if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) if hasattr(self, "text_encoder_hid_proj") and self.text_encoder_hid_proj is not None: encoder_hidden_states = self.text_encoder_hid_proj(encoder_hidden_states) image_embeds = added_cond_kwargs.get("image_embeds") image_embeds = self.encoder_hid_proj(image_embeds) encoder_hidden_states = (encoder_hidden_states, image_embeds) return encoder_hidden_states def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNet2DConditionOutput, Tuple]: r""" The [`UNet2DConditionModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`): Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed through the `self.time_embedding` layer to obtain the timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). added_cond_kwargs: (`dict`, *optional*): A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that are passed along to the UNet blocks. down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*): A tuple of tensors that if specified are added to the residuals of down unet blocks. mid_block_additional_residual: (`torch.Tensor`, *optional*): A tensor that if specified is added to the residual of the middle unet block. down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*): additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s) encoder_attention_mask (`torch.Tensor`): A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None for dim in sample.shape[-2:]: if dim % default_overall_up_factor != 0: # Forward upsample size to force interpolation output size. forward_upsample_size = True break # ensure attention_mask is a bias, and give it a singleton query_tokens dimension # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 0. center input if necessary if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time t_emb = self.get_time_embed(sample=sample, timestep=timestep) emb = self.time_embedding(t_emb, timestep_cond) class_emb = self.get_class_embed(sample=sample, class_labels=class_labels) if class_emb is not None: if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb aug_emb = self.get_aug_embed( emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs ) if self.config.addition_embed_type == "image_hint": aug_emb, hint = aug_emb sample = torch.cat([sample, hint], dim=1) emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) encoder_hidden_states = self.process_encoder_hidden_states( encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs ) # 2. pre-process sample = self.conv_in(sample) # 2.5 GLIGEN position net if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None: cross_attention_kwargs = cross_attention_kwargs.copy() gligen_args = cross_attention_kwargs.pop("gligen") cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)} # 3. down # we're popping the `scale` instead of getting it because otherwise `scale` will be propagated # to the internal blocks and will raise deprecation warnings. this will be confusing for our users. if cross_attention_kwargs is not None: cross_attention_kwargs = cross_attention_kwargs.copy() lora_scale = cross_attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets is_adapter = down_intrablock_additional_residuals is not None # maintain backward compatibility for legacy usage, where # T2I-Adapter and ControlNet both use down_block_additional_residuals arg # but can only use one or the other if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None: deprecate( "T2I should not use down_block_additional_residuals", "1.3.0", "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ", standard_warn=False, ) down_intrablock_additional_residuals = down_block_additional_residuals is_adapter = True down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D additional_residuals = {} if is_adapter and len(down_intrablock_additional_residuals) > 0: additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0) sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, **additional_residuals, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) if is_adapter and len(down_intrablock_additional_residuals) > 0: sample += down_intrablock_additional_residuals.pop(0) down_block_res_samples += res_samples if is_controlnet: new_down_block_res_samples = () for down_block_res_sample, down_block_additional_residual in zip( down_block_res_samples, down_block_additional_residuals ): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) else: sample = self.mid_block(sample, emb) # To support T2I-Adapter-XL if ( is_adapter and len(down_intrablock_additional_residuals) > 0 and sample.shape == down_intrablock_additional_residuals[0].shape ): sample += down_intrablock_additional_residuals.pop(0) if is_controlnet: sample = sample + mid_block_additional_residual # 5. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (sample,) return UNet2DConditionOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_2d_condition.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_2d_condition.py", "repo_id": "diffusers", "token_count": 29853 }
166
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, List, Optional, Tuple, Union import numpy as np import torch from ...models import AutoencoderKL from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam from .modular_pipeline import FluxModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Adapted from the original implementation. def prepare_latents_img2img( vae, scheduler, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, generator ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) vae_scale_factor = 2 ** (len(vae.config.block_out_channels) - 1) latent_channels = vae.config.latent_channels # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (vae_scale_factor * 2)) width = 2 * (int(width) // (vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) latent_image_ids = _prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) image = image.to(device=device, dtype=dtype) if image.shape[1] != latent_channels: image_latents = _encode_vae_image(image=image, generator=generator) else: image_latents = image if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = scheduler.scale_noise(image_latents, timestep, noise) latents = _pack_latents(latents, batch_size, num_channels_latents, height, width) return latents, latent_image_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height, width, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape( latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype) # Cannot use "# Copied from" because it introduces weird indentation errors. def _encode_vae_image(vae, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [ retrieve_latents(vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(vae.encode(image), generator=generator) image_latents = (image_latents - vae.config.shift_factor) * vae.config.scaling_factor return image_latents def _get_initial_timesteps_and_optionals( transformer, scheduler, batch_size, height, width, vae_scale_factor, num_inference_steps, guidance_scale, sigmas, device, ): image_seq_len = (int(height) // vae_scale_factor // 2) * (int(width) // vae_scale_factor // 2) sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas if hasattr(scheduler.config, "use_flow_sigmas") and scheduler.config.use_flow_sigmas: sigmas = None mu = calculate_shift( image_seq_len, scheduler.config.get("base_image_seq_len", 256), scheduler.config.get("max_image_seq_len", 4096), scheduler.config.get("base_shift", 0.5), scheduler.config.get("max_shift", 1.15), ) timesteps, num_inference_steps = retrieve_timesteps(scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu) if transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(batch_size) else: guidance = None return timesteps, num_inference_steps, sigmas, guidance class FluxInputStep(ModularPipelineBlocks): model_name = "flux" @property def description(self) -> str: return ( "Input processing step that:\n" " 1. Determines `batch_size` and `dtype` based on `prompt_embeds`\n" " 2. Adjusts input tensor shapes based on `batch_size` (number of prompts) and `num_images_per_prompt`\n\n" "All input tensors are expected to have either batch_size=1 or match the batch_size\n" "of prompt_embeds. The tensors will be duplicated across the batch dimension to\n" "have a final batch_size of batch_size * num_images_per_prompt." ) @property def inputs(self) -> List[InputParam]: return [ InputParam("num_images_per_prompt", default=1), InputParam( "prompt_embeds", required=True, type_hint=torch.Tensor, description="Pre-generated text embeddings. Can be generated from text_encoder step.", ), InputParam( "pooled_prompt_embeds", type_hint=torch.Tensor, description="Pre-generated pooled text embeddings. Can be generated from text_encoder step.", ), # TODO: support negative embeddings? ] @property def intermediate_outputs(self) -> List[str]: return [ OutputParam( "batch_size", type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt", ), OutputParam( "dtype", type_hint=torch.dtype, description="Data type of model tensor inputs (determined by `prompt_embeds`)", ), OutputParam( "prompt_embeds", type_hint=torch.Tensor, description="text embeddings used to guide the image generation", ), OutputParam( "pooled_prompt_embeds", type_hint=torch.Tensor, description="pooled text embeddings used to guide the image generation", ), # TODO: support negative embeddings? ] def check_inputs(self, components, block_state): if block_state.prompt_embeds is not None and block_state.pooled_prompt_embeds is not None: if block_state.prompt_embeds.shape[0] != block_state.pooled_prompt_embeds.shape[0]: raise ValueError( "`prompt_embeds` and `pooled_prompt_embeds` must have the same batch size when passed directly, but" f" got: `prompt_embeds` {block_state.prompt_embeds.shape} != `pooled_prompt_embeds`" f" {block_state.pooled_prompt_embeds.shape}." ) @torch.no_grad() def __call__(self, components: FluxModularPipeline, state: PipelineState) -> PipelineState: # TODO: consider adding negative embeddings? block_state = self.get_block_state(state) self.check_inputs(components, block_state) block_state.batch_size = block_state.prompt_embeds.shape[0] block_state.dtype = block_state.prompt_embeds.dtype _, seq_len, _ = block_state.prompt_embeds.shape block_state.prompt_embeds = block_state.prompt_embeds.repeat(1, block_state.num_images_per_prompt, 1) block_state.prompt_embeds = block_state.prompt_embeds.view( block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1 ) self.set_block_state(state, block_state) return components, state class FluxSetTimestepsStep(ModularPipelineBlocks): model_name = "flux" @property def expected_components(self) -> List[ComponentSpec]: return [ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler)] @property def description(self) -> str: return "Step that sets the scheduler's timesteps for inference" @property def inputs(self) -> List[InputParam]: return [ InputParam("num_inference_steps", default=50), InputParam("timesteps"), InputParam("sigmas"), InputParam("guidance_scale", default=3.5), InputParam("latents", type_hint=torch.Tensor), InputParam("num_images_per_prompt", default=1), InputParam("height", type_hint=int), InputParam("width", type_hint=int), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be `batch_size * num_images_per_prompt`. Can be generated in input step.", ), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam("timesteps", type_hint=torch.Tensor, description="The timesteps to use for inference"), OutputParam( "num_inference_steps", type_hint=int, description="The number of denoising steps to perform at inference time", ), OutputParam("guidance", type_hint=torch.Tensor, description="Optional guidance to be used."), ] @torch.no_grad() def __call__(self, components: FluxModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.device = components._execution_device scheduler = components.scheduler transformer = components.transformer batch_size = block_state.batch_size * block_state.num_images_per_prompt timesteps, num_inference_steps, sigmas, guidance = _get_initial_timesteps_and_optionals( transformer, scheduler, batch_size, block_state.height, block_state.width, components.vae_scale_factor, block_state.num_inference_steps, block_state.guidance_scale, block_state.sigmas, block_state.device, ) block_state.timesteps = timesteps block_state.num_inference_steps = num_inference_steps block_state.sigmas = sigmas block_state.guidance = guidance self.set_block_state(state, block_state) return components, state class FluxImg2ImgSetTimestepsStep(ModularPipelineBlocks): model_name = "flux" @property def expected_components(self) -> List[ComponentSpec]: return [ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler)] @property def description(self) -> str: return "Step that sets the scheduler's timesteps for inference" @property def inputs(self) -> List[InputParam]: return [ InputParam("num_inference_steps", default=50), InputParam("timesteps"), InputParam("sigmas"), InputParam("strength", default=0.6), InputParam("guidance_scale", default=3.5), InputParam("num_images_per_prompt", default=1), InputParam("height", type_hint=int), InputParam("width", type_hint=int), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be `batch_size * num_images_per_prompt`. Can be generated in input step.", ), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam("timesteps", type_hint=torch.Tensor, description="The timesteps to use for inference"), OutputParam( "num_inference_steps", type_hint=int, description="The number of denoising steps to perform at inference time", ), OutputParam( "latent_timestep", type_hint=torch.Tensor, description="The timestep that represents the initial noise level for image-to-image generation", ), OutputParam("guidance", type_hint=torch.Tensor, description="Optional guidance to be used."), ] @staticmethod # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps with self.scheduler->scheduler def get_timesteps(scheduler, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = scheduler.timesteps[t_start * scheduler.order :] if hasattr(scheduler, "set_begin_index"): scheduler.set_begin_index(t_start * scheduler.order) return timesteps, num_inference_steps - t_start @torch.no_grad() def __call__(self, components: FluxModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.device = components._execution_device scheduler = components.scheduler transformer = components.transformer batch_size = block_state.batch_size * block_state.num_images_per_prompt timesteps, num_inference_steps, sigmas, guidance = _get_initial_timesteps_and_optionals( transformer, scheduler, batch_size, block_state.height, block_state.width, components.vae_scale_factor, block_state.num_inference_steps, block_state.guidance_scale, block_state.sigmas, block_state.device, ) timesteps, num_inference_steps = self.get_timesteps( scheduler, num_inference_steps, block_state.strength, block_state.device ) block_state.timesteps = timesteps block_state.num_inference_steps = num_inference_steps block_state.sigmas = sigmas block_state.guidance = guidance block_state.latent_timestep = timesteps[:1].repeat(batch_size) self.set_block_state(state, block_state) return components, state class FluxPrepareLatentsStep(ModularPipelineBlocks): model_name = "flux" @property def expected_components(self) -> List[ComponentSpec]: return [] @property def description(self) -> str: return "Prepare latents step that prepares the latents for the text-to-image generation process" @property def inputs(self) -> List[InputParam]: return [ InputParam("height", type_hint=int), InputParam("width", type_hint=int), InputParam("latents", type_hint=Optional[torch.Tensor]), InputParam("num_images_per_prompt", type_hint=int, default=1), InputParam("generator"), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be `batch_size * num_images_per_prompt`. Can be generated in input step.", ), InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process" ), OutputParam( "latent_image_ids", type_hint=torch.Tensor, description="IDs computed from the image sequence needed for RoPE", ), ] @staticmethod def check_inputs(components, block_state): if (block_state.height is not None and block_state.height % (components.vae_scale_factor * 2) != 0) or ( block_state.width is not None and block_state.width % (components.vae_scale_factor * 2) != 0 ): logger.warning( f"`height` and `width` have to be divisible by {components.vae_scale_factor} but are {block_state.height} and {block_state.width}." ) @staticmethod def prepare_latents( comp, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): # Couldn't use the `prepare_latents` method directly from Flux because I decided to copy over # the packing methods here. So, for example, `comp._pack_latents()` won't work if we were # to go with the "# Copied from ..." approach. Or maybe there's a way? # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (comp.vae_scale_factor * 2)) width = 2 * (int(width) // (comp.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is not None: latent_image_ids = _prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) return latents.to(device=device, dtype=dtype), latent_image_ids if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = _pack_latents(latents, batch_size, num_channels_latents, height, width) latent_image_ids = _prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) return latents, latent_image_ids @torch.no_grad() def __call__(self, components: FluxModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.height = block_state.height or components.default_height block_state.width = block_state.width or components.default_width block_state.device = components._execution_device block_state.dtype = torch.bfloat16 # TODO: okay to hardcode this? block_state.num_channels_latents = components.num_channels_latents self.check_inputs(components, block_state) batch_size = block_state.batch_size * block_state.num_images_per_prompt block_state.latents, block_state.latent_image_ids = self.prepare_latents( components, batch_size, block_state.num_channels_latents, block_state.height, block_state.width, block_state.dtype, block_state.device, block_state.generator, block_state.latents, ) self.set_block_state(state, block_state) return components, state class FluxImg2ImgPrepareLatentsStep(ModularPipelineBlocks): model_name = "flux" @property def expected_components(self) -> List[ComponentSpec]: return [ComponentSpec("vae", AutoencoderKL), ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler)] @property def description(self) -> str: return "Step that prepares the latents for the image-to-image generation process" @property def inputs(self) -> List[Tuple[str, Any]]: return [ InputParam("height", type_hint=int), InputParam("width", type_hint=int), InputParam("latents", type_hint=Optional[torch.Tensor]), InputParam("num_images_per_prompt", type_hint=int, default=1), InputParam("generator"), InputParam( "image_latents", required=True, type_hint=torch.Tensor, description="The latents representing the reference image for image-to-image/inpainting generation. Can be generated in vae_encode step.", ), InputParam( "latent_timestep", required=True, type_hint=torch.Tensor, description="The timestep that represents the initial noise level for image-to-image/inpainting generation. Can be generated in set_timesteps step.", ), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can be generated in input step.", ), InputParam("dtype", required=True, type_hint=torch.dtype, description="The dtype of the model inputs"), ] @property def intermediate_outputs(self) -> List[OutputParam]: return [ OutputParam( "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process" ), OutputParam( "latent_image_ids", type_hint=torch.Tensor, description="IDs computed from the image sequence needed for RoPE", ), ] @torch.no_grad() def __call__(self, components: FluxModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.height = block_state.height or components.default_height block_state.width = block_state.width or components.default_width block_state.device = components._execution_device block_state.dtype = torch.bfloat16 # TODO: okay to hardcode this? block_state.num_channels_latents = components.num_channels_latents block_state.dtype = block_state.dtype if block_state.dtype is not None else components.vae.dtype block_state.device = components._execution_device # TODO: implement `check_inputs` batch_size = block_state.batch_size * block_state.num_images_per_prompt if block_state.latents is None: block_state.latents, block_state.latent_image_ids = prepare_latents_img2img( components.vae, components.scheduler, block_state.image_latents, block_state.latent_timestep, batch_size, block_state.num_channels_latents, block_state.height, block_state.width, block_state.dtype, block_state.device, block_state.generator, ) self.set_block_state(state, block_state) return components, state
diffusers/src/diffusers/modular_pipelines/flux/before_denoise.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/flux/before_denoise.py", "repo_id": "diffusers", "token_count": 12386 }
167
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["encoders"] = ["WanTextEncoderStep"] _import_structure["modular_blocks"] = [ "ALL_BLOCKS", "AUTO_BLOCKS", "TEXT2VIDEO_BLOCKS", "WanAutoBeforeDenoiseStep", "WanAutoBlocks", "WanAutoBlocks", "WanAutoDecodeStep", "WanAutoDenoiseStep", ] _import_structure["modular_pipeline"] = ["WanModularPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .encoders import WanTextEncoderStep from .modular_blocks import ( ALL_BLOCKS, AUTO_BLOCKS, TEXT2VIDEO_BLOCKS, WanAutoBeforeDenoiseStep, WanAutoBlocks, WanAutoDecodeStep, WanAutoDenoiseStep, ) from .modular_pipeline import WanModularPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/modular_pipelines/wan/__init__.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/wan/__init__.py", "repo_id": "diffusers", "token_count": 853 }
168
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict from huggingface_hub.utils import validate_hf_hub_args from ..configuration_utils import ConfigMixin from ..models.controlnets import ControlNetUnionModel from ..utils import is_sentencepiece_available from .aura_flow import AuraFlowPipeline from .chroma import ChromaPipeline from .cogview3 import CogView3PlusPipeline from .cogview4 import CogView4ControlPipeline, CogView4Pipeline from .controlnet import ( StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetInpaintPipeline, StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetUnionImg2ImgPipeline, StableDiffusionXLControlNetUnionInpaintPipeline, StableDiffusionXLControlNetUnionPipeline, ) from .controlnet_sd3 import ( StableDiffusion3ControlNetInpaintingPipeline, StableDiffusion3ControlNetPipeline, ) from .deepfloyd_if import IFImg2ImgPipeline, IFInpaintingPipeline, IFPipeline from .flux import ( FluxControlImg2ImgPipeline, FluxControlInpaintPipeline, FluxControlNetImg2ImgPipeline, FluxControlNetInpaintPipeline, FluxControlNetPipeline, FluxControlPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxKontextPipeline, FluxPipeline, ) from .hunyuandit import HunyuanDiTPipeline from .kandinsky import ( KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyImg2ImgPipeline, KandinskyInpaintCombinedPipeline, KandinskyInpaintPipeline, KandinskyPipeline, ) from .kandinsky2_2 import ( KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22Img2ImgPipeline, KandinskyV22InpaintCombinedPipeline, KandinskyV22InpaintPipeline, KandinskyV22Pipeline, ) from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline from .lumina import LuminaPipeline from .lumina2 import Lumina2Pipeline from .pag import ( HunyuanDiTPAGPipeline, PixArtSigmaPAGPipeline, SanaPAGPipeline, StableDiffusion3PAGImg2ImgPipeline, StableDiffusion3PAGPipeline, StableDiffusionControlNetPAGInpaintPipeline, StableDiffusionControlNetPAGPipeline, StableDiffusionPAGImg2ImgPipeline, StableDiffusionPAGInpaintPipeline, StableDiffusionPAGPipeline, StableDiffusionXLControlNetPAGImg2ImgPipeline, StableDiffusionXLControlNetPAGPipeline, StableDiffusionXLPAGImg2ImgPipeline, StableDiffusionXLPAGInpaintPipeline, StableDiffusionXLPAGPipeline, ) from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline from .sana import SanaPipeline from .stable_cascade import StableCascadeCombinedPipeline, StableCascadeDecoderPipeline from .stable_diffusion import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, ) from .stable_diffusion_3 import ( StableDiffusion3Img2ImgPipeline, StableDiffusion3InpaintPipeline, StableDiffusion3Pipeline, ) from .stable_diffusion_xl import ( StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, ) from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict( [ ("stable-diffusion", StableDiffusionPipeline), ("stable-diffusion-xl", StableDiffusionXLPipeline), ("stable-diffusion-3", StableDiffusion3Pipeline), ("stable-diffusion-3-pag", StableDiffusion3PAGPipeline), ("if", IFPipeline), ("hunyuan", HunyuanDiTPipeline), ("hunyuan-pag", HunyuanDiTPAGPipeline), ("kandinsky", KandinskyCombinedPipeline), ("kandinsky22", KandinskyV22CombinedPipeline), ("kandinsky3", Kandinsky3Pipeline), ("stable-diffusion-controlnet", StableDiffusionControlNetPipeline), ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetPipeline), ("stable-diffusion-xl-controlnet-union", StableDiffusionXLControlNetUnionPipeline), ("stable-diffusion-3-controlnet", StableDiffusion3ControlNetPipeline), ("wuerstchen", WuerstchenCombinedPipeline), ("cascade", StableCascadeCombinedPipeline), ("lcm", LatentConsistencyModelPipeline), ("pixart-alpha", PixArtAlphaPipeline), ("pixart-sigma", PixArtSigmaPipeline), ("sana", SanaPipeline), ("sana-pag", SanaPAGPipeline), ("stable-diffusion-pag", StableDiffusionPAGPipeline), ("stable-diffusion-controlnet-pag", StableDiffusionControlNetPAGPipeline), ("stable-diffusion-xl-pag", StableDiffusionXLPAGPipeline), ("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGPipeline), ("pixart-sigma-pag", PixArtSigmaPAGPipeline), ("auraflow", AuraFlowPipeline), ("flux", FluxPipeline), ("flux-control", FluxControlPipeline), ("flux-controlnet", FluxControlNetPipeline), ("flux-kontext", FluxKontextPipeline), ("lumina", LuminaPipeline), ("lumina2", Lumina2Pipeline), ("chroma", ChromaPipeline), ("cogview3", CogView3PlusPipeline), ("cogview4", CogView4Pipeline), ("cogview4-control", CogView4ControlPipeline), ] ) AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict( [ ("stable-diffusion", StableDiffusionImg2ImgPipeline), ("stable-diffusion-xl", StableDiffusionXLImg2ImgPipeline), ("stable-diffusion-3", StableDiffusion3Img2ImgPipeline), ("stable-diffusion-3-pag", StableDiffusion3PAGImg2ImgPipeline), ("if", IFImg2ImgPipeline), ("kandinsky", KandinskyImg2ImgCombinedPipeline), ("kandinsky22", KandinskyV22Img2ImgCombinedPipeline), ("kandinsky3", Kandinsky3Img2ImgPipeline), ("stable-diffusion-controlnet", StableDiffusionControlNetImg2ImgPipeline), ("stable-diffusion-pag", StableDiffusionPAGImg2ImgPipeline), ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetImg2ImgPipeline), ("stable-diffusion-xl-controlnet-union", StableDiffusionXLControlNetUnionImg2ImgPipeline), ("stable-diffusion-xl-pag", StableDiffusionXLPAGImg2ImgPipeline), ("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGImg2ImgPipeline), ("lcm", LatentConsistencyModelImg2ImgPipeline), ("flux", FluxImg2ImgPipeline), ("flux-controlnet", FluxControlNetImg2ImgPipeline), ("flux-control", FluxControlImg2ImgPipeline), ("flux-kontext", FluxKontextPipeline), ] ) AUTO_INPAINT_PIPELINES_MAPPING = OrderedDict( [ ("stable-diffusion", StableDiffusionInpaintPipeline), ("stable-diffusion-xl", StableDiffusionXLInpaintPipeline), ("stable-diffusion-3", StableDiffusion3InpaintPipeline), ("if", IFInpaintingPipeline), ("kandinsky", KandinskyInpaintCombinedPipeline), ("kandinsky22", KandinskyV22InpaintCombinedPipeline), ("stable-diffusion-controlnet", StableDiffusionControlNetInpaintPipeline), ("stable-diffusion-controlnet-pag", StableDiffusionControlNetPAGInpaintPipeline), ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetInpaintPipeline), ("stable-diffusion-xl-controlnet-union", StableDiffusionXLControlNetUnionInpaintPipeline), ("stable-diffusion-3-controlnet", StableDiffusion3ControlNetInpaintingPipeline), ("stable-diffusion-xl-pag", StableDiffusionXLPAGInpaintPipeline), ("flux", FluxInpaintPipeline), ("flux-controlnet", FluxControlNetInpaintPipeline), ("flux-control", FluxControlInpaintPipeline), ("stable-diffusion-pag", StableDiffusionPAGInpaintPipeline), ] ) _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( [ ("kandinsky", KandinskyPipeline), ("kandinsky22", KandinskyV22Pipeline), ("wuerstchen", WuerstchenDecoderPipeline), ("cascade", StableCascadeDecoderPipeline), ] ) _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING = OrderedDict( [ ("kandinsky", KandinskyImg2ImgPipeline), ("kandinsky22", KandinskyV22Img2ImgPipeline), ] ) _AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict( [ ("kandinsky", KandinskyInpaintPipeline), ("kandinsky22", KandinskyV22InpaintPipeline), ] ) if is_sentencepiece_available(): from .kolors import KolorsImg2ImgPipeline, KolorsPipeline from .pag import KolorsPAGPipeline AUTO_TEXT2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline AUTO_TEXT2IMAGE_PIPELINES_MAPPING["kolors-pag"] = KolorsPAGPipeline AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsImg2ImgPipeline SUPPORTED_TASKS_MAPPINGS = [ AUTO_TEXT2IMAGE_PIPELINES_MAPPING, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING, _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING, _AUTO_INPAINT_DECODER_PIPELINES_MAPPING, ] def _get_connected_pipeline(pipeline_cls): # for now connected pipelines can only be loaded from decoder pipelines, such as kandinsky-community/kandinsky-2-2-decoder if pipeline_cls in _AUTO_TEXT2IMAGE_DECODER_PIPELINES_MAPPING.values(): return _get_task_class( AUTO_TEXT2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False ) if pipeline_cls in _AUTO_IMAGE2IMAGE_DECODER_PIPELINES_MAPPING.values(): return _get_task_class( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False ) if pipeline_cls in _AUTO_INPAINT_DECODER_PIPELINES_MAPPING.values(): return _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, pipeline_cls.__name__, throw_error_if_not_exist=False) def _get_model(pipeline_class_name): for task_mapping in SUPPORTED_TASKS_MAPPINGS: for model_name, pipeline in task_mapping.items(): if pipeline.__name__ == pipeline_class_name: return model_name def _get_task_class(mapping, pipeline_class_name, throw_error_if_not_exist: bool = True): model_name = _get_model(pipeline_class_name) if model_name is not None: task_class = mapping.get(model_name, None) if task_class is not None: return task_class if throw_error_if_not_exist: raise ValueError(f"AutoPipeline can't find a pipeline linked to {pipeline_class_name} for {model_name}") class AutoPipelineForText2Image(ConfigMixin): r""" [`AutoPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The specific underlying pipeline class is automatically selected from either the [`~AutoPipelineForText2Image.from_pretrained`] or [`~AutoPipelineForText2Image.from_pipe`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): raise EnvironmentError( f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." ) @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_or_path, **kwargs): r""" Instantiates a text-to-image Pytorch diffusion pipeline from pretrained pipeline weight. The from_pretrained() method takes care of returning the correct pipeline class instance by: 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its config object 2. Find the text-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name. If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetPipeline`] object. The pipeline is set in evaluation mode (`model.eval()`) by default. If you get the error message below, you need to finetune the weights for your downstream task: ``` Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. torch_dtype (`torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`. </Tip> Examples: ```py >>> from diffusers import AutoPipelineForText2Image >>> pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") >>> image = pipeline(prompt).images[0] ``` """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) load_config_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "token": token, "local_files_only": local_files_only, "revision": revision, } config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) orig_class_name = config["_class_name"] if "ControlPipeline" in orig_class_name: to_replace = "ControlPipeline" else: to_replace = "Pipeline" if "controlnet" in kwargs: if isinstance(kwargs["controlnet"], ControlNetUnionModel): orig_class_name = config["_class_name"].replace(to_replace, "ControlNetUnionPipeline") else: orig_class_name = config["_class_name"].replace(to_replace, "ControlNetPipeline") if "enable_pag" in kwargs: enable_pag = kwargs.pop("enable_pag") if enable_pag: orig_class_name = orig_class_name.replace(to_replace, "PAGPipeline") text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, orig_class_name) kwargs = {**load_config_kwargs, **kwargs} return text_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs) @classmethod def from_pipe(cls, pipeline, **kwargs): r""" Instantiates a text-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class. The from_pipe() method takes care of returning the correct pipeline class instance by finding the text-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name. All the modules the pipeline contains will be used to initialize the new pipeline without reallocating additional memory. The pipeline is set in evaluation mode (`model.eval()`) by default. Parameters: pipeline (`DiffusionPipeline`): an instantiated `DiffusionPipeline` object ```py >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image >>> pipe_i2i = AutoPipelineForImage2Image.from_pretrained( ... "stable-diffusion-v1-5/stable-diffusion-v1-5", requires_safety_checker=False ... ) >>> pipe_t2i = AutoPipelineForText2Image.from_pipe(pipe_i2i) >>> image = pipe_t2i(prompt).images[0] ``` """ original_config = dict(pipeline.config) original_cls_name = pipeline.__class__.__name__ # derive the pipeline class to instantiate text_2_image_cls = _get_task_class(AUTO_TEXT2IMAGE_PIPELINES_MAPPING, original_cls_name) if "controlnet" in kwargs: if kwargs["controlnet"] is not None: to_replace = "PAGPipeline" if "PAG" in text_2_image_cls.__name__ else "Pipeline" text_2_image_cls = _get_task_class( AUTO_TEXT2IMAGE_PIPELINES_MAPPING, text_2_image_cls.__name__.replace("ControlNet", "").replace(to_replace, "ControlNet" + to_replace), ) else: text_2_image_cls = _get_task_class( AUTO_TEXT2IMAGE_PIPELINES_MAPPING, text_2_image_cls.__name__.replace("ControlNet", ""), ) if "enable_pag" in kwargs: enable_pag = kwargs.pop("enable_pag") if enable_pag: text_2_image_cls = _get_task_class( AUTO_TEXT2IMAGE_PIPELINES_MAPPING, text_2_image_cls.__name__.replace("PAG", "").replace("Pipeline", "PAGPipeline"), ) else: text_2_image_cls = _get_task_class( AUTO_TEXT2IMAGE_PIPELINES_MAPPING, text_2_image_cls.__name__.replace("PAG", ""), ) # define expected module and optional kwargs given the pipeline signature expected_modules, optional_kwargs = text_2_image_cls._get_signature_keys(text_2_image_cls) pretrained_model_name_or_path = original_config.pop("_name_or_path", None) # allow users pass modules in `kwargs` to override the original pipeline's components passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} original_class_obj = { k: pipeline.components[k] for k, v in pipeline.components.items() if k in expected_modules and k not in passed_class_obj } # allow users pass optional kwargs to override the original pipelines config attribute passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} original_pipe_kwargs = { k: original_config[k] for k, v in original_config.items() if k in optional_kwargs and k not in passed_pipe_kwargs } # config that were not expected by original pipeline is stored as private attribute # we will pass them as optional arguments if they can be accepted by the pipeline additional_pipe_kwargs = [ k[1:] for k in original_config.keys() if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs ] for k in additional_pipe_kwargs: original_pipe_kwargs[k] = original_config.pop(f"_{k}") text_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} # store unused config as private attribute unused_original_config = { f"{'' if k.startswith('_') else '_'}{k}": original_config[k] for k, v in original_config.items() if k not in text_2_image_kwargs } missing_modules = ( set(expected_modules) - set(text_2_image_cls._optional_components) - set(text_2_image_kwargs.keys()) ) if len(missing_modules) > 0: raise ValueError( f"Pipeline {text_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" ) model = text_2_image_cls(**text_2_image_kwargs) model.register_to_config(_name_or_path=pretrained_model_name_or_path) model.register_to_config(**unused_original_config) return model class AutoPipelineForImage2Image(ConfigMixin): r""" [`AutoPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The specific underlying pipeline class is automatically selected from either the [`~AutoPipelineForImage2Image.from_pretrained`] or [`~AutoPipelineForImage2Image.from_pipe`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): raise EnvironmentError( f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." ) @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_or_path, **kwargs): r""" Instantiates a image-to-image Pytorch diffusion pipeline from pretrained pipeline weight. The from_pretrained() method takes care of returning the correct pipeline class instance by: 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its config object 2. Find the image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name. If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetImg2ImgPipeline`] object. The pipeline is set in evaluation mode (`model.eval()`) by default. If you get the error message below, you need to finetune the weights for your downstream task: ``` Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`. </Tip> Examples: ```py >>> from diffusers import AutoPipelineForImage2Image >>> pipeline = AutoPipelineForImage2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") >>> image = pipeline(prompt, image).images[0] ``` """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) load_config_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "token": token, "local_files_only": local_files_only, "revision": revision, } config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) orig_class_name = config["_class_name"] # the `orig_class_name` can be: # `- *Pipeline` (for regular text-to-image checkpoint) # - `*ControlPipeline` (for Flux tools specific checkpoint) # `- *Img2ImgPipeline` (for refiner checkpoint) if "Img2Img" in orig_class_name: to_replace = "Img2ImgPipeline" elif "ControlPipeline" in orig_class_name: to_replace = "ControlPipeline" else: to_replace = "Pipeline" if "controlnet" in kwargs: if isinstance(kwargs["controlnet"], ControlNetUnionModel): orig_class_name = orig_class_name.replace(to_replace, "ControlNetUnion" + to_replace) else: orig_class_name = orig_class_name.replace(to_replace, "ControlNet" + to_replace) if "enable_pag" in kwargs: enable_pag = kwargs.pop("enable_pag") if enable_pag: orig_class_name = orig_class_name.replace(to_replace, "PAG" + to_replace) if to_replace == "ControlPipeline": orig_class_name = orig_class_name.replace(to_replace, "ControlImg2ImgPipeline") image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, orig_class_name) kwargs = {**load_config_kwargs, **kwargs} return image_2_image_cls.from_pretrained(pretrained_model_or_path, **kwargs) @classmethod def from_pipe(cls, pipeline, **kwargs): r""" Instantiates a image-to-image Pytorch diffusion pipeline from another instantiated diffusion pipeline class. The from_pipe() method takes care of returning the correct pipeline class instance by finding the image-to-image pipeline linked to the pipeline class using pattern matching on pipeline class name. All the modules the pipeline contains will be used to initialize the new pipeline without reallocating additional memory. The pipeline is set in evaluation mode (`model.eval()`) by default. Parameters: pipeline (`DiffusionPipeline`): an instantiated `DiffusionPipeline` object Examples: ```py >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained( ... "stable-diffusion-v1-5/stable-diffusion-v1-5", requires_safety_checker=False ... ) >>> pipe_i2i = AutoPipelineForImage2Image.from_pipe(pipe_t2i) >>> image = pipe_i2i(prompt, image).images[0] ``` """ original_config = dict(pipeline.config) original_cls_name = pipeline.__class__.__name__ # derive the pipeline class to instantiate image_2_image_cls = _get_task_class(AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, original_cls_name) if "controlnet" in kwargs: if kwargs["controlnet"] is not None: to_replace = "Img2ImgPipeline" if "PAG" in image_2_image_cls.__name__: to_replace = "PAG" + to_replace image_2_image_cls = _get_task_class( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace("ControlNet", "").replace( to_replace, "ControlNet" + to_replace ), ) else: image_2_image_cls = _get_task_class( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace("ControlNet", ""), ) if "enable_pag" in kwargs: enable_pag = kwargs.pop("enable_pag") if enable_pag: image_2_image_cls = _get_task_class( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace("PAG", "").replace("Img2ImgPipeline", "PAGImg2ImgPipeline"), ) else: image_2_image_cls = _get_task_class( AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, image_2_image_cls.__name__.replace("PAG", ""), ) # define expected module and optional kwargs given the pipeline signature expected_modules, optional_kwargs = image_2_image_cls._get_signature_keys(image_2_image_cls) pretrained_model_name_or_path = original_config.pop("_name_or_path", None) # allow users pass modules in `kwargs` to override the original pipeline's components passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} original_class_obj = { k: pipeline.components[k] for k, v in pipeline.components.items() if k in expected_modules and k not in passed_class_obj } # allow users pass optional kwargs to override the original pipelines config attribute passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} original_pipe_kwargs = { k: original_config[k] for k, v in original_config.items() if k in optional_kwargs and k not in passed_pipe_kwargs } # config attribute that were not expected by original pipeline is stored as its private attribute # we will pass them as optional arguments if they can be accepted by the pipeline additional_pipe_kwargs = [ k[1:] for k in original_config.keys() if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs ] for k in additional_pipe_kwargs: original_pipe_kwargs[k] = original_config.pop(f"_{k}") image_2_image_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} # store unused config as private attribute unused_original_config = { f"{'' if k.startswith('_') else '_'}{k}": original_config[k] for k, v in original_config.items() if k not in image_2_image_kwargs } missing_modules = ( set(expected_modules) - set(image_2_image_cls._optional_components) - set(image_2_image_kwargs.keys()) ) if len(missing_modules) > 0: raise ValueError( f"Pipeline {image_2_image_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" ) model = image_2_image_cls(**image_2_image_kwargs) model.register_to_config(_name_or_path=pretrained_model_name_or_path) model.register_to_config(**unused_original_config) return model class AutoPipelineForInpainting(ConfigMixin): r""" [`AutoPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The specific underlying pipeline class is automatically selected from either the [`~AutoPipelineForInpainting.from_pretrained`] or [`~AutoPipelineForInpainting.from_pipe`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): raise EnvironmentError( f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_pipe(pipeline)` methods." ) @classmethod @validate_hf_hub_args def from_pretrained(cls, pretrained_model_or_path, **kwargs): r""" Instantiates a inpainting Pytorch diffusion pipeline from pretrained pipeline weight. The from_pretrained() method takes care of returning the correct pipeline class instance by: 1. Detect the pipeline class of the pretrained_model_or_path based on the _class_name property of its config object 2. Find the inpainting pipeline linked to the pipeline class using pattern matching on pipeline class name. If a `controlnet` argument is passed, it will instantiate a [`StableDiffusionControlNetInpaintPipeline`] object. The pipeline is set in evaluation mode (`model.eval()`) by default. If you get the error message below, you need to finetune the weights for your downstream task: ``` Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `hf auth login`. </Tip> Examples: ```py >>> from diffusers import AutoPipelineForInpainting >>> pipeline = AutoPipelineForInpainting.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") >>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0] ``` """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) revision = kwargs.pop("revision", None) load_config_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "token": token, "local_files_only": local_files_only, "revision": revision, } config = cls.load_config(pretrained_model_or_path, **load_config_kwargs) orig_class_name = config["_class_name"] # The `orig_class_name`` can be: # `- *InpaintPipeline` (for inpaint-specific checkpoint) # - `*ControlPipeline` (for Flux tools specific checkpoint) # - or *Pipeline (for regular text-to-image checkpoint) if "Inpaint" in orig_class_name: to_replace = "InpaintPipeline" elif "ControlPipeline" in orig_class_name: to_replace = "ControlPipeline" else: to_replace = "Pipeline" if "controlnet" in kwargs: if isinstance(kwargs["controlnet"], ControlNetUnionModel): orig_class_name = orig_class_name.replace(to_replace, "ControlNetUnion" + to_replace) else: orig_class_name = orig_class_name.replace(to_replace, "ControlNet" + to_replace) if "enable_pag" in kwargs: enable_pag = kwargs.pop("enable_pag") if enable_pag: orig_class_name = orig_class_name.replace(to_replace, "PAG" + to_replace) if to_replace == "ControlPipeline": orig_class_name = orig_class_name.replace(to_replace, "ControlInpaintPipeline") inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, orig_class_name) kwargs = {**load_config_kwargs, **kwargs} return inpainting_cls.from_pretrained(pretrained_model_or_path, **kwargs) @classmethod def from_pipe(cls, pipeline, **kwargs): r""" Instantiates a inpainting Pytorch diffusion pipeline from another instantiated diffusion pipeline class. The from_pipe() method takes care of returning the correct pipeline class instance by finding the inpainting pipeline linked to the pipeline class using pattern matching on pipeline class name. All the modules the pipeline class contain will be used to initialize the new pipeline without reallocating additional memory. The pipeline is set in evaluation mode (`model.eval()`) by default. Parameters: pipeline (`DiffusionPipeline`): an instantiated `DiffusionPipeline` object Examples: ```py >>> from diffusers import AutoPipelineForText2Image, AutoPipelineForInpainting >>> pipe_t2i = AutoPipelineForText2Image.from_pretrained( ... "DeepFloyd/IF-I-XL-v1.0", requires_safety_checker=False ... ) >>> pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_t2i) >>> image = pipe_inpaint(prompt, image=init_image, mask_image=mask_image).images[0] ``` """ original_config = dict(pipeline.config) original_cls_name = pipeline.__class__.__name__ # derive the pipeline class to instantiate inpainting_cls = _get_task_class(AUTO_INPAINT_PIPELINES_MAPPING, original_cls_name) if "controlnet" in kwargs: if kwargs["controlnet"] is not None: inpainting_cls = _get_task_class( AUTO_INPAINT_PIPELINES_MAPPING, inpainting_cls.__name__.replace("ControlNet", "").replace( "InpaintPipeline", "ControlNetInpaintPipeline" ), ) else: inpainting_cls = _get_task_class( AUTO_INPAINT_PIPELINES_MAPPING, inpainting_cls.__name__.replace("ControlNetInpaintPipeline", "InpaintPipeline"), ) if "enable_pag" in kwargs: enable_pag = kwargs.pop("enable_pag") if enable_pag: inpainting_cls = _get_task_class( AUTO_INPAINT_PIPELINES_MAPPING, inpainting_cls.__name__.replace("PAG", "").replace("InpaintPipeline", "PAGInpaintPipeline"), ) else: inpainting_cls = _get_task_class( AUTO_INPAINT_PIPELINES_MAPPING, inpainting_cls.__name__.replace("PAGInpaintPipeline", "InpaintPipeline"), ) # define expected module and optional kwargs given the pipeline signature expected_modules, optional_kwargs = inpainting_cls._get_signature_keys(inpainting_cls) pretrained_model_name_or_path = original_config.pop("_name_or_path", None) # allow users pass modules in `kwargs` to override the original pipeline's components passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs} original_class_obj = { k: pipeline.components[k] for k, v in pipeline.components.items() if k in expected_modules and k not in passed_class_obj } # allow users pass optional kwargs to override the original pipelines config attribute passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs} original_pipe_kwargs = { k: original_config[k] for k, v in original_config.items() if k in optional_kwargs and k not in passed_pipe_kwargs } # config that were not expected by original pipeline is stored as private attribute # we will pass them as optional arguments if they can be accepted by the pipeline additional_pipe_kwargs = [ k[1:] for k in original_config.keys() if k.startswith("_") and k[1:] in optional_kwargs and k[1:] not in passed_pipe_kwargs ] for k in additional_pipe_kwargs: original_pipe_kwargs[k] = original_config.pop(f"_{k}") inpainting_kwargs = {**passed_class_obj, **original_class_obj, **passed_pipe_kwargs, **original_pipe_kwargs} # store unused config as private attribute unused_original_config = { f"{'' if k.startswith('_') else '_'}{k}": original_config[k] for k, v in original_config.items() if k not in inpainting_kwargs } missing_modules = ( set(expected_modules) - set(inpainting_cls._optional_components) - set(inpainting_kwargs.keys()) ) if len(missing_modules) > 0: raise ValueError( f"Pipeline {inpainting_cls} expected {expected_modules}, but only {set(list(passed_class_obj.keys()) + list(original_class_obj.keys()))} were passed" ) model = inpainting_cls(**inpainting_kwargs) model.register_to_config(_name_or_path=pretrained_model_name_or_path) model.register_to_config(**unused_original_config) return model
diffusers/src/diffusers/pipelines/auto_pipeline.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/auto_pipeline.py", "repo_id": "diffusers", "token_count": 24679 }
169
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["multicontrolnet"] = ["MultiControlNetModel"] _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"] _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"] _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"] _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"] _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"] _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"] _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"] _import_structure["pipeline_controlnet_union_inpaint_sd_xl"] = ["StableDiffusionXLControlNetUnionInpaintPipeline"] _import_structure["pipeline_controlnet_union_sd_xl"] = ["StableDiffusionXLControlNetUnionPipeline"] _import_structure["pipeline_controlnet_union_sd_xl_img2img"] = ["StableDiffusionXLControlNetUnionImg2ImgPipeline"] try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_flax_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline from .pipeline_controlnet_union_inpaint_sd_xl import StableDiffusionXLControlNetUnionInpaintPipeline from .pipeline_controlnet_union_sd_xl import StableDiffusionXLControlNetUnionPipeline from .pipeline_controlnet_union_sd_xl_img2img import StableDiffusionXLControlNetUnionImg2ImgPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/controlnet/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/__init__.py", "repo_id": "diffusers", "token_count": 1545 }
170
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import torch from ...models import UNet2DModel from ...schedulers import DDPMScheduler from ...utils import is_torch_xla_available from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False class DDPMPipeline(DiffusionPipeline): r""" Pipeline for image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet: UNet2DModel, scheduler: DDPMScheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 1000, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. num_inference_steps (`int`, *optional*, defaults to 1000): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Example: ```py >>> from diffusers import DDPMPipeline >>> # load model and scheduler >>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256") >>> # run pipeline in inference (sample random noise and denoise) >>> image = pipe().images[0] >>> # save image >>> image.save("ddpm_generated_image.png") ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size, int): image_shape = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if self.device.type == "mps": # randn does not work reproducibly on mps image = randn_tensor(image_shape, generator=generator, dtype=self.unet.dtype) image = image.to(self.device) else: image = randn_tensor(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. compute previous image: x_t -> x_t-1 image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample if XLA_AVAILABLE: xm.mark_step() image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/ddpm/pipeline_ddpm.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ddpm/pipeline_ddpm.py", "repo_id": "diffusers", "token_count": 2235 }
171
# Copyright 2022 The Music Spectrogram Diffusion Authors. # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import math import os from typing import Any, Callable, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union import numpy as np import torch import torch.nn.functional as F from ....utils import is_note_seq_available from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH if is_note_seq_available(): import note_seq else: raise ImportError("Please install note-seq via `pip install note-seq`") INPUT_FEATURE_LENGTH = 2048 SAMPLE_RATE = 16000 HOP_SIZE = 320 FRAME_RATE = int(SAMPLE_RATE // HOP_SIZE) DEFAULT_STEPS_PER_SECOND = 100 DEFAULT_MAX_SHIFT_SECONDS = 10 DEFAULT_NUM_VELOCITY_BINS = 1 SLAKH_CLASS_PROGRAMS = { "Acoustic Piano": 0, "Electric Piano": 4, "Chromatic Percussion": 8, "Organ": 16, "Acoustic Guitar": 24, "Clean Electric Guitar": 26, "Distorted Electric Guitar": 29, "Acoustic Bass": 32, "Electric Bass": 33, "Violin": 40, "Viola": 41, "Cello": 42, "Contrabass": 43, "Orchestral Harp": 46, "Timpani": 47, "String Ensemble": 48, "Synth Strings": 50, "Choir and Voice": 52, "Orchestral Hit": 55, "Trumpet": 56, "Trombone": 57, "Tuba": 58, "French Horn": 60, "Brass Section": 61, "Soprano/Alto Sax": 64, "Tenor Sax": 66, "Baritone Sax": 67, "Oboe": 68, "English Horn": 69, "Bassoon": 70, "Clarinet": 71, "Pipe": 73, "Synth Lead": 80, "Synth Pad": 88, } @dataclasses.dataclass class NoteRepresentationConfig: """Configuration note representations.""" onsets_only: bool include_ties: bool @dataclasses.dataclass class NoteEventData: pitch: int velocity: Optional[int] = None program: Optional[int] = None is_drum: Optional[bool] = None instrument: Optional[int] = None @dataclasses.dataclass class NoteEncodingState: """Encoding state for note transcription, keeping track of active pitches.""" # velocity bin for active pitches and programs active_pitches: MutableMapping[Tuple[int, int], int] = dataclasses.field(default_factory=dict) @dataclasses.dataclass class EventRange: type: str min_value: int max_value: int @dataclasses.dataclass class Event: type: str value: int class Tokenizer: def __init__(self, regular_ids: int): # The special tokens: 0=PAD, 1=EOS, and 2=UNK self._num_special_tokens = 3 self._num_regular_tokens = regular_ids def encode(self, token_ids): encoded = [] for token_id in token_ids: if not 0 <= token_id < self._num_regular_tokens: raise ValueError( f"token_id {token_id} does not fall within valid range of [0, {self._num_regular_tokens})" ) encoded.append(token_id + self._num_special_tokens) # Add EOS token encoded.append(1) # Pad to till INPUT_FEATURE_LENGTH encoded = encoded + [0] * (INPUT_FEATURE_LENGTH - len(encoded)) return encoded class Codec: """Encode and decode events. Useful for declaring what certain ranges of a vocabulary should be used for. This is intended to be used from Python before encoding or after decoding with GenericTokenVocabulary. This class is more lightweight and does not include things like EOS or UNK token handling. To ensure that 'shift' events are always the first block of the vocab and start at 0, that event type is required and specified separately. """ def __init__(self, max_shift_steps: int, steps_per_second: float, event_ranges: List[EventRange]): """Define Codec. Args: max_shift_steps: Maximum number of shift steps that can be encoded. steps_per_second: Shift steps will be interpreted as having a duration of 1 / steps_per_second. event_ranges: Other supported event types and their ranges. """ self.steps_per_second = steps_per_second self._shift_range = EventRange(type="shift", min_value=0, max_value=max_shift_steps) self._event_ranges = [self._shift_range] + event_ranges # Ensure all event types have unique names. assert len(self._event_ranges) == len({er.type for er in self._event_ranges}) @property def num_classes(self) -> int: return sum(er.max_value - er.min_value + 1 for er in self._event_ranges) # The next couple methods are simplified special case methods just for shift # events that are intended to be used from within autograph functions. def is_shift_event_index(self, index: int) -> bool: return (self._shift_range.min_value <= index) and (index <= self._shift_range.max_value) @property def max_shift_steps(self) -> int: return self._shift_range.max_value def encode_event(self, event: Event) -> int: """Encode an event to an index.""" offset = 0 for er in self._event_ranges: if event.type == er.type: if not er.min_value <= event.value <= er.max_value: raise ValueError( f"Event value {event.value} is not within valid range " f"[{er.min_value}, {er.max_value}] for type {event.type}" ) return offset + event.value - er.min_value offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event type: {event.type}") def event_type_range(self, event_type: str) -> Tuple[int, int]: """Return [min_id, max_id] for an event type.""" offset = 0 for er in self._event_ranges: if event_type == er.type: return offset, offset + (er.max_value - er.min_value) offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event type: {event_type}") def decode_event_index(self, index: int) -> Event: """Decode an event index to an Event.""" offset = 0 for er in self._event_ranges: if offset <= index <= offset + er.max_value - er.min_value: return Event(type=er.type, value=er.min_value + index - offset) offset += er.max_value - er.min_value + 1 raise ValueError(f"Unknown event index: {index}") @dataclasses.dataclass class ProgramGranularity: # both tokens_map_fn and program_map_fn should be idempotent tokens_map_fn: Callable[[Sequence[int], Codec], Sequence[int]] program_map_fn: Callable[[int], int] def drop_programs(tokens, codec: Codec): """Drops program change events from a token sequence.""" min_program_id, max_program_id = codec.event_type_range("program") return tokens[(tokens < min_program_id) | (tokens > max_program_id)] def programs_to_midi_classes(tokens, codec): """Modifies program events to be the first program in the MIDI class.""" min_program_id, max_program_id = codec.event_type_range("program") is_program = (tokens >= min_program_id) & (tokens <= max_program_id) return np.where(is_program, min_program_id + 8 * ((tokens - min_program_id) // 8), tokens) PROGRAM_GRANULARITIES = { # "flat" granularity; drop program change tokens and set NoteSequence # programs to zero "flat": ProgramGranularity(tokens_map_fn=drop_programs, program_map_fn=lambda program: 0), # map each program to the first program in its MIDI class "midi_class": ProgramGranularity( tokens_map_fn=programs_to_midi_classes, program_map_fn=lambda program: 8 * (program // 8) ), # leave programs as is "full": ProgramGranularity(tokens_map_fn=lambda tokens, codec: tokens, program_map_fn=lambda program: program), } def frame(signal, frame_length, frame_step, pad_end=False, pad_value=0, axis=-1): """ equivalent of tf.signal.frame """ signal_length = signal.shape[axis] if pad_end: frames_overlap = frame_length - frame_step rest_samples = np.abs(signal_length - frames_overlap) % np.abs(frame_length - frames_overlap) pad_size = int(frame_length - rest_samples) if pad_size != 0: pad_axis = [0] * signal.ndim pad_axis[axis] = pad_size signal = F.pad(signal, pad_axis, "constant", pad_value) frames = signal.unfold(axis, frame_length, frame_step) return frames def program_to_slakh_program(program): # this is done very hackily, probably should use a custom mapping for slakh_program in sorted(SLAKH_CLASS_PROGRAMS.values(), reverse=True): if program >= slakh_program: return slakh_program def audio_to_frames( samples, hop_size: int, frame_rate: int, ) -> Tuple[Sequence[Sequence[int]], torch.Tensor]: """Convert audio samples to non-overlapping frames and frame times.""" frame_size = hop_size samples = np.pad(samples, [0, frame_size - len(samples) % frame_size], mode="constant") # Split audio into frames. frames = frame( torch.Tensor(samples).unsqueeze(0), frame_length=frame_size, frame_step=frame_size, pad_end=False, # TODO check why its off by 1 here when True ) num_frames = len(samples) // frame_size times = np.arange(num_frames) / frame_rate return frames, times def note_sequence_to_onsets_and_offsets_and_programs( ns: note_seq.NoteSequence, ) -> Tuple[Sequence[float], Sequence[NoteEventData]]: """Extract onset & offset times and pitches & programs from a NoteSequence. The onset & offset times will not necessarily be in sorted order. Args: ns: NoteSequence from which to extract onsets and offsets. Returns: times: A list of note onset and offset times. values: A list of NoteEventData objects where velocity is zero for note offsets. """ # Sort by program and pitch and put offsets before onsets as a tiebreaker for # subsequent stable sort. notes = sorted(ns.notes, key=lambda note: (note.is_drum, note.program, note.pitch)) times = [note.end_time for note in notes if not note.is_drum] + [note.start_time for note in notes] values = [ NoteEventData(pitch=note.pitch, velocity=0, program=note.program, is_drum=False) for note in notes if not note.is_drum ] + [ NoteEventData(pitch=note.pitch, velocity=note.velocity, program=note.program, is_drum=note.is_drum) for note in notes ] return times, values def num_velocity_bins_from_codec(codec: Codec): """Get number of velocity bins from event codec.""" lo, hi = codec.event_type_range("velocity") return hi - lo # segment an array into segments of length n def segment(a, n): return [a[i : i + n] for i in range(0, len(a), n)] def velocity_to_bin(velocity, num_velocity_bins): if velocity == 0: return 0 else: return math.ceil(num_velocity_bins * velocity / note_seq.MAX_MIDI_VELOCITY) def note_event_data_to_events( state: Optional[NoteEncodingState], value: NoteEventData, codec: Codec, ) -> Sequence[Event]: """Convert note event data to a sequence of events.""" if value.velocity is None: # onsets only, no program or velocity return [Event("pitch", value.pitch)] else: num_velocity_bins = num_velocity_bins_from_codec(codec) velocity_bin = velocity_to_bin(value.velocity, num_velocity_bins) if value.program is None: # onsets + offsets + velocities only, no programs if state is not None: state.active_pitches[(value.pitch, 0)] = velocity_bin return [Event("velocity", velocity_bin), Event("pitch", value.pitch)] else: if value.is_drum: # drum events use a separate vocabulary return [Event("velocity", velocity_bin), Event("drum", value.pitch)] else: # program + velocity + pitch if state is not None: state.active_pitches[(value.pitch, value.program)] = velocity_bin return [ Event("program", value.program), Event("velocity", velocity_bin), Event("pitch", value.pitch), ] def note_encoding_state_to_events(state: NoteEncodingState) -> Sequence[Event]: """Output program and pitch events for active notes plus a final tie event.""" events = [] for pitch, program in sorted(state.active_pitches.keys(), key=lambda k: k[::-1]): if state.active_pitches[(pitch, program)]: events += [Event("program", program), Event("pitch", pitch)] events.append(Event("tie", 0)) return events def encode_and_index_events( state, event_times, event_values, codec, frame_times, encode_event_fn, encoding_state_to_events_fn=None ): """Encode a sequence of timed events and index to audio frame times. Encodes time shifts as repeated single step shifts for later run length encoding. Optionally, also encodes a sequence of "state events", keeping track of the current encoding state at each audio frame. This can be used e.g. to prepend events representing the current state to a targets segment. Args: state: Initial event encoding state. event_times: Sequence of event times. event_values: Sequence of event values. encode_event_fn: Function that transforms event value into a sequence of one or more Event objects. codec: An Codec object that maps Event objects to indices. frame_times: Time for every audio frame. encoding_state_to_events_fn: Function that transforms encoding state into a sequence of one or more Event objects. Returns: events: Encoded events and shifts. event_start_indices: Corresponding start event index for every audio frame. Note: one event can correspond to multiple audio indices due to sampling rate differences. This makes splitting sequences tricky because the same event can appear at the end of one sequence and the beginning of another. event_end_indices: Corresponding end event index for every audio frame. Used to ensure when slicing that one chunk ends where the next begins. Should always be true that event_end_indices[i] = event_start_indices[i + 1]. state_events: Encoded "state" events representing the encoding state before each event. state_event_indices: Corresponding state event index for every audio frame. """ indices = np.argsort(event_times, kind="stable") event_steps = [round(event_times[i] * codec.steps_per_second) for i in indices] event_values = [event_values[i] for i in indices] events = [] state_events = [] event_start_indices = [] state_event_indices = [] cur_step = 0 cur_event_idx = 0 cur_state_event_idx = 0 def fill_event_start_indices_to_cur_step(): while ( len(event_start_indices) < len(frame_times) and frame_times[len(event_start_indices)] < cur_step / codec.steps_per_second ): event_start_indices.append(cur_event_idx) state_event_indices.append(cur_state_event_idx) for event_step, event_value in zip(event_steps, event_values): while event_step > cur_step: events.append(codec.encode_event(Event(type="shift", value=1))) cur_step += 1 fill_event_start_indices_to_cur_step() cur_event_idx = len(events) cur_state_event_idx = len(state_events) if encoding_state_to_events_fn: # Dump state to state events *before* processing the next event, because # we want to capture the state prior to the occurrence of the event. for e in encoding_state_to_events_fn(state): state_events.append(codec.encode_event(e)) for e in encode_event_fn(state, event_value, codec): events.append(codec.encode_event(e)) # After the last event, continue filling out the event_start_indices array. # The inequality is not strict because if our current step lines up exactly # with (the start of) an audio frame, we need to add an additional shift event # to "cover" that frame. while cur_step / codec.steps_per_second <= frame_times[-1]: events.append(codec.encode_event(Event(type="shift", value=1))) cur_step += 1 fill_event_start_indices_to_cur_step() cur_event_idx = len(events) # Now fill in event_end_indices. We need this extra array to make sure that # when we slice events, each slice ends exactly where the subsequent slice # begins. event_end_indices = event_start_indices[1:] + [len(events)] events = np.array(events).astype(np.int32) state_events = np.array(state_events).astype(np.int32) event_start_indices = segment(np.array(event_start_indices).astype(np.int32), TARGET_FEATURE_LENGTH) event_end_indices = segment(np.array(event_end_indices).astype(np.int32), TARGET_FEATURE_LENGTH) state_event_indices = segment(np.array(state_event_indices).astype(np.int32), TARGET_FEATURE_LENGTH) outputs = [] for start_indices, end_indices, event_indices in zip(event_start_indices, event_end_indices, state_event_indices): outputs.append( { "inputs": events, "event_start_indices": start_indices, "event_end_indices": end_indices, "state_events": state_events, "state_event_indices": event_indices, } ) return outputs def extract_sequence_with_indices(features, state_events_end_token=None, feature_key="inputs"): """Extract target sequence corresponding to audio token segment.""" features = features.copy() start_idx = features["event_start_indices"][0] end_idx = features["event_end_indices"][-1] features[feature_key] = features[feature_key][start_idx:end_idx] if state_events_end_token is not None: # Extract the state events corresponding to the audio start token, and # prepend them to the targets array. state_event_start_idx = features["state_event_indices"][0] state_event_end_idx = state_event_start_idx + 1 while features["state_events"][state_event_end_idx - 1] != state_events_end_token: state_event_end_idx += 1 features[feature_key] = np.concatenate( [ features["state_events"][state_event_start_idx:state_event_end_idx], features[feature_key], ], axis=0, ) return features def map_midi_programs( feature, codec: Codec, granularity_type: str = "full", feature_key: str = "inputs" ) -> Mapping[str, Any]: """Apply MIDI program map to token sequences.""" granularity = PROGRAM_GRANULARITIES[granularity_type] feature[feature_key] = granularity.tokens_map_fn(feature[feature_key], codec) return feature def run_length_encode_shifts_fn( features, codec: Codec, feature_key: str = "inputs", state_change_event_types: Sequence[str] = (), ) -> Callable[[Mapping[str, Any]], Mapping[str, Any]]: """Return a function that run-length encodes shifts for a given codec. Args: codec: The Codec to use for shift events. feature_key: The feature key for which to run-length encode shifts. state_change_event_types: A list of event types that represent state changes; tokens corresponding to these event types will be interpreted as state changes and redundant ones will be removed. Returns: A preprocessing function that run-length encodes single-step shifts. """ state_change_event_ranges = [codec.event_type_range(event_type) for event_type in state_change_event_types] def run_length_encode_shifts(features: MutableMapping[str, Any]) -> Mapping[str, Any]: """Combine leading/interior shifts, trim trailing shifts. Args: features: Dict of features to process. Returns: A dict of features. """ events = features[feature_key] shift_steps = 0 total_shift_steps = 0 output = np.array([], dtype=np.int32) current_state = np.zeros(len(state_change_event_ranges), dtype=np.int32) for event in events: if codec.is_shift_event_index(event): shift_steps += 1 total_shift_steps += 1 else: # If this event is a state change and has the same value as the current # state, we can skip it entirely. is_redundant = False for i, (min_index, max_index) in enumerate(state_change_event_ranges): if (min_index <= event) and (event <= max_index): if current_state[i] == event: is_redundant = True current_state[i] = event if is_redundant: continue # Once we've reached a non-shift event, RLE all previous shift events # before outputting the non-shift event. if shift_steps > 0: shift_steps = total_shift_steps while shift_steps > 0: output_steps = np.minimum(codec.max_shift_steps, shift_steps) output = np.concatenate([output, [output_steps]], axis=0) shift_steps -= output_steps output = np.concatenate([output, [event]], axis=0) features[feature_key] = output return features return run_length_encode_shifts(features) def note_representation_processor_chain(features, codec: Codec, note_representation_config: NoteRepresentationConfig): tie_token = codec.encode_event(Event("tie", 0)) state_events_end_token = tie_token if note_representation_config.include_ties else None features = extract_sequence_with_indices( features, state_events_end_token=state_events_end_token, feature_key="inputs" ) features = map_midi_programs(features, codec) features = run_length_encode_shifts_fn(features, codec, state_change_event_types=["velocity", "program"]) return features class MidiProcessor: def __init__(self): self.codec = Codec( max_shift_steps=DEFAULT_MAX_SHIFT_SECONDS * DEFAULT_STEPS_PER_SECOND, steps_per_second=DEFAULT_STEPS_PER_SECOND, event_ranges=[ EventRange("pitch", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), EventRange("velocity", 0, DEFAULT_NUM_VELOCITY_BINS), EventRange("tie", 0, 0), EventRange("program", note_seq.MIN_MIDI_PROGRAM, note_seq.MAX_MIDI_PROGRAM), EventRange("drum", note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH), ], ) self.tokenizer = Tokenizer(self.codec.num_classes) self.note_representation_config = NoteRepresentationConfig(onsets_only=False, include_ties=True) def __call__(self, midi: Union[bytes, os.PathLike, str]): if not isinstance(midi, bytes): with open(midi, "rb") as f: midi = f.read() ns = note_seq.midi_to_note_sequence(midi) ns_sus = note_seq.apply_sustain_control_changes(ns) for note in ns_sus.notes: if not note.is_drum: note.program = program_to_slakh_program(note.program) samples = np.zeros(int(ns_sus.total_time * SAMPLE_RATE)) _, frame_times = audio_to_frames(samples, HOP_SIZE, FRAME_RATE) times, values = note_sequence_to_onsets_and_offsets_and_programs(ns_sus) events = encode_and_index_events( state=NoteEncodingState(), event_times=times, event_values=values, frame_times=frame_times, codec=self.codec, encode_event_fn=note_event_data_to_events, encoding_state_to_events_fn=note_encoding_state_to_events, ) events = [ note_representation_processor_chain(event, self.codec, self.note_representation_config) for event in events ] input_tokens = [self.tokenizer.encode(event["inputs"]) for event in events] return input_tokens
diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/midi_utils.py", "repo_id": "diffusers", "token_count": 10185 }
172
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.utils.checkpoint from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name class VersatileDiffusionImageVariationPipeline(DiffusionPipeline): r""" Pipeline for image variation using Versatile Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. bert ([`LDMBertModel`]): Text-encoder model based on [`~transformers.BERT`]. tokenizer ([`~transformers.BertTokenizer`]): A `BertTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "bert->unet->vqvae" image_feature_extractor: CLIPImageProcessor image_encoder: CLIPVisionModelWithProjection image_unet: UNet2DConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers def __init__( self, image_feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection, image_unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( image_feature_extractor=image_feature_extractor, image_encoder=image_encoder, image_unet=image_unet, vae=vae, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). """ def normalize_embeddings(encoder_output): embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) embeds = self.image_encoder.visual_projection(embeds) embeds_pooled = embeds[:, 0:1] embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) return embeds if isinstance(prompt, torch.Tensor) and len(prompt.shape) == 4: prompt = list(prompt) batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings image_input = self.image_feature_extractor(images=prompt, return_tensors="pt") pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) image_embeddings = self.image_encoder(pixel_values) image_embeddings = normalize_embeddings(image_embeddings) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_images: List[str] if negative_prompt is None: uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, PIL.Image.Image): uncond_images = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_images = negative_prompt uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt") pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) negative_prompt_embeds = self.image_encoder(pixel_values) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and conditional embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs def check_inputs(self, image, height, width, callback_steps): if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" The call function to the pipeline for generation. Args: image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`): The image prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionImageVariationPipeline >>> import torch >>> import requests >>> from io import BytesIO >>> from PIL import Image >>> # let's download an initial image >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" >>> response = requests.get(url) >>> image = Image.open(BytesIO(response.content)).convert("RGB") >>> pipe = VersatileDiffusionImageVariationPipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> image = pipe(image, generator=generator).images[0] >>> image.save("./car_variation.png") ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(image, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(image, PIL.Image.Image) else len(image) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt image_embeddings = self._encode_prompt( image, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py", "repo_id": "diffusers", "token_count": 8245 }
173
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_hunyuan_skyreels_image2video"] = ["HunyuanSkyreelsImageToVideoPipeline"] _import_structure["pipeline_hunyuan_video"] = ["HunyuanVideoPipeline"] _import_structure["pipeline_hunyuan_video_framepack"] = ["HunyuanVideoFramepackPipeline"] _import_structure["pipeline_hunyuan_video_image2video"] = ["HunyuanVideoImageToVideoPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_hunyuan_skyreels_image2video import HunyuanSkyreelsImageToVideoPipeline from .pipeline_hunyuan_video import HunyuanVideoPipeline from .pipeline_hunyuan_video_framepack import HunyuanVideoFramepackPipeline from .pipeline_hunyuan_video_image2video import HunyuanVideoImageToVideoPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/hunyuan_video/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/hunyuan_video/__init__.py", "repo_id": "diffusers", "token_count": 737 }
174
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class MCLIPConfig(XLMRobertaConfig): model_type = "M-CLIP" def __init__(self, transformerDimSize=1024, imageDimSize=768, **kwargs): self.transformerDimensions = transformerDimSize self.numDims = imageDimSize super().__init__(**kwargs) class MultilingualCLIP(PreTrainedModel): config_class = MCLIPConfig def __init__(self, config, *args, **kwargs): super().__init__(config, *args, **kwargs) self.transformer = XLMRobertaModel(config) self.LinearTransformation = torch.nn.Linear( in_features=config.transformerDimensions, out_features=config.numDims ) def forward(self, input_ids, attention_mask): embs = self.transformer(input_ids=input_ids, attention_mask=attention_mask)[0] embs2 = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(embs2), embs
diffusers/src/diffusers/pipelines/kandinsky/text_encoder.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky/text_encoder.py", "repo_id": "diffusers", "token_count": 405 }
175
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["modeling_latent_upsampler"] = ["LTXLatentUpsamplerModel"] _import_structure["pipeline_ltx"] = ["LTXPipeline"] _import_structure["pipeline_ltx_condition"] = ["LTXConditionPipeline"] _import_structure["pipeline_ltx_image2video"] = ["LTXImageToVideoPipeline"] _import_structure["pipeline_ltx_latent_upsample"] = ["LTXLatentUpsamplePipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .modeling_latent_upsampler import LTXLatentUpsamplerModel from .pipeline_ltx import LTXPipeline from .pipeline_ltx_condition import LTXConditionPipeline from .pipeline_ltx_image2video import LTXImageToVideoPipeline from .pipeline_ltx_latent_upsample import LTXLatentUpsamplePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/ltx/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ltx/__init__.py", "repo_id": "diffusers", "token_count": 752 }
176
# Copyright 2025 PixArt-Sigma Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html import inspect import re import urllib.parse as ul import warnings from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import Gemma2PreTrainedModel, GemmaTokenizer, GemmaTokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PixArtImageProcessor from ...loaders import SanaLoraLoaderMixin from ...models import AutoencoderDC, SanaTransformer2DModel from ...schedulers import DPMSolverMultistepScheduler from ...utils import ( BACKENDS_MAPPING, USE_PEFT_BACKEND, is_bs4_available, is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import get_device, is_torch_version, randn_tensor from ..pipeline_utils import DiffusionPipeline from ..pixart_alpha.pipeline_pixart_alpha import ( ASPECT_RATIO_512_BIN, ASPECT_RATIO_1024_BIN, ) from ..pixart_alpha.pipeline_pixart_sigma import ASPECT_RATIO_2048_BIN from .pipeline_output import SanaPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy ASPECT_RATIO_4096_BIN = { "0.25": [2048.0, 8192.0], "0.26": [2048.0, 7936.0], "0.27": [2048.0, 7680.0], "0.28": [2048.0, 7424.0], "0.32": [2304.0, 7168.0], "0.33": [2304.0, 6912.0], "0.35": [2304.0, 6656.0], "0.4": [2560.0, 6400.0], "0.42": [2560.0, 6144.0], "0.48": [2816.0, 5888.0], "0.5": [2816.0, 5632.0], "0.52": [2816.0, 5376.0], "0.57": [3072.0, 5376.0], "0.6": [3072.0, 5120.0], "0.68": [3328.0, 4864.0], "0.72": [3328.0, 4608.0], "0.78": [3584.0, 4608.0], "0.82": [3584.0, 4352.0], "0.88": [3840.0, 4352.0], "0.94": [3840.0, 4096.0], "1.0": [4096.0, 4096.0], "1.07": [4096.0, 3840.0], "1.13": [4352.0, 3840.0], "1.21": [4352.0, 3584.0], "1.29": [4608.0, 3584.0], "1.38": [4608.0, 3328.0], "1.46": [4864.0, 3328.0], "1.67": [5120.0, 3072.0], "1.75": [5376.0, 3072.0], "2.0": [5632.0, 2816.0], "2.09": [5888.0, 2816.0], "2.4": [6144.0, 2560.0], "2.5": [6400.0, 2560.0], "2.89": [6656.0, 2304.0], "3.0": [6912.0, 2304.0], "3.11": [7168.0, 2304.0], "3.62": [7424.0, 2048.0], "3.75": [7680.0, 2048.0], "3.88": [7936.0, 2048.0], "4.0": [8192.0, 2048.0], } EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import SanaPipeline >>> pipe = SanaPipeline.from_pretrained( ... "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers", torch_dtype=torch.float32 ... ) >>> pipe.to("cuda") >>> pipe.text_encoder.to(torch.bfloat16) >>> pipe.transformer = pipe.transformer.to(torch.bfloat16) >>> image = pipe(prompt='a cyberpunk cat with a neon sign that says "Sana"')[0] >>> image[0].save("output.png") ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class SanaPipeline(DiffusionPipeline, SanaLoraLoaderMixin): r""" Pipeline for text-to-image generation using [Sana](https://huggingface.co/papers/2410.10629). """ # fmt: off bad_punct_regex = re.compile(r"[" + "#®•©™&@·º½¾¿¡§~" + r"\)" + r"\(" + r"\]" + r"\[" + r"\}" + r"\{" + r"\|" + "\\" + r"\/" + r"\*" + r"]{1,}") # fmt: on model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: Union[GemmaTokenizer, GemmaTokenizerFast], text_encoder: Gemma2PreTrainedModel, vae: AutoencoderDC, transformer: SanaTransformer2DModel, scheduler: DPMSolverMultistepScheduler, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.encoder_block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 32 ) self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() def _get_gemma_prompt_embeds( self, prompt: Union[str, List[str]], device: torch.device, dtype: torch.dtype, clean_caption: bool = False, max_sequence_length: int = 300, complex_human_instruction: Optional[List[str]] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`, *optional*): torch device to place the resulting embeddings on clean_caption (`bool`, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt. complex_human_instruction (`list[str]`, defaults to `complex_human_instruction`): If `complex_human_instruction` is not empty, the function will use the complex Human instruction for the prompt. """ prompt = [prompt] if isinstance(prompt, str) else prompt if getattr(self, "tokenizer", None) is not None: self.tokenizer.padding_side = "right" prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) # prepare complex human instruction if not complex_human_instruction: max_length_all = max_sequence_length else: chi_prompt = "\n".join(complex_human_instruction) prompt = [chi_prompt + p for p in prompt] num_chi_prompt_tokens = len(self.tokenizer.encode(chi_prompt)) max_length_all = num_chi_prompt_tokens + max_sequence_length - 2 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length_all, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0].to(dtype=dtype, device=device) return prompt_embeds, prompt_attention_mask def encode_prompt( self, prompt: Union[str, List[str]], do_classifier_free_guidance: bool = True, negative_prompt: str = "", num_images_per_prompt: int = 1, device: Optional[torch.device] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, clean_caption: bool = False, max_sequence_length: int = 300, complex_human_instruction: Optional[List[str]] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For PixArt-Alpha, this should be "". do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. For Sana, it's should be the embeddings of the "" string. clean_caption (`bool`, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt. complex_human_instruction (`list[str]`, defaults to `complex_human_instruction`): If `complex_human_instruction` is not empty, the function will use the complex Human instruction for the prompt. """ if device is None: device = self._execution_device if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = None # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, SanaLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if getattr(self, "tokenizer", None) is not None: self.tokenizer.padding_side = "right" # See Section 3.1. of the paper. max_length = max_sequence_length select_index = [0] + list(range(-max_length + 1, 0)) if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=prompt, device=device, dtype=dtype, clean_caption=clean_caption, max_sequence_length=max_sequence_length, complex_human_instruction=complex_human_instruction, ) prompt_embeds = prompt_embeds[:, select_index] prompt_attention_mask = prompt_attention_mask[:, select_index] bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt negative_prompt_embeds, negative_prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=negative_prompt, device=device, dtype=dtype, clean_caption=clean_caption, max_sequence_length=max_sequence_length, complex_human_instruction=False, ) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None if self.text_encoder is not None: if isinstance(self, SanaLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_on_step_end_tensor_inputs=None, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub("<person>", "person", caption) # urls: caption = re.sub( r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls caption = re.sub( r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls # html: caption = BeautifulSoup(caption, features="html.parser").text # @<nickname> caption = re.sub(r"@[\w\d]+\b", "", caption) # 31C0—31EF CJK Strokes # 31F0—31FF Katakana Phonetic Extensions # 3200—32FF Enclosed CJK Letters and Months # 3300—33FF CJK Compatibility # 3400—4DBF CJK Unified Ideographs Extension A # 4DC0—4DFF Yijing Hexagram Symbols # 4E00—9FFF CJK Unified Ideographs caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) caption = re.sub(r"[\u3200-\u32ff]+", "", caption) caption = re.sub(r"[\u3300-\u33ff]+", "", caption) caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) ####################################################### # все виды тире / all types of dash --> "-" caption = re.sub( r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa "-", caption, ) # кавычки к одному стандарту caption = re.sub(r"[`´«»“”¨]", '"', caption) caption = re.sub(r"[‘’]", "'", caption) # &quot; caption = re.sub(r"&quot;?", "", caption) # &amp caption = re.sub(r"&amp", "", caption) # ip addresses: caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) # article ids: caption = re.sub(r"\d:\d\d\s+$", "", caption) # \n caption = re.sub(r"\\n", " ", caption) # "#123" caption = re.sub(r"#\d{1,3}\b", "", caption) # "#12345.." caption = re.sub(r"#\d{5,}\b", "", caption) # "123456.." caption = re.sub(r"\b\d{6,}\b", "", caption) # filenames: caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) # caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " # this-is-my-cute-cat / this_is_my_cute_cat regex2 = re.compile(r"(?:\-|\_)") if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, " ", caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) caption = re.sub(r"\bpage\s+\d+\b", "", caption) caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) caption = re.sub(r"\b\s+\:\s+", r": ", caption) caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) caption = re.sub(r"\s+", " ", caption) caption.strip() caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) caption = re.sub(r"^\.\S+$", "", caption) return caption.strip() def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def attention_kwargs(self): return self._attention_kwargs @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: str = "", num_inference_steps: int = 20, timesteps: List[int] = None, sigmas: List[float] = None, guidance_scale: float = 4.5, num_images_per_prompt: Optional[int] = 1, height: int = 1024, width: int = 1024, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, clean_caption: bool = False, use_resolution_binning: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 300, complex_human_instruction: List[str] = [ "Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:", "- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.", "- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.", "Here are examples of how to transform or refine prompts:", "- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat curled up in a round shape, sleeping peacefully on a warm sunny windowsill, surrounded by pots of blooming red flowers.", "- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps, a diverse crowd of people in colorful clothing, and a double-decker bus passing by towering glass skyscrapers.", "Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:", "User Prompt: ", ], ) -> Union[SanaPipelineOutput, Tuple]: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_inference_steps (`int`, *optional*, defaults to 20): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 4.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated image. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. attention_kwargs: A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. use_resolution_binning (`bool` defaults to `True`): If set to `True`, the requested height and width are first mapped to the closest resolutions using `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to the requested resolution. Useful for generating non-square images. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `300`): Maximum sequence length to use with the `prompt`. complex_human_instruction (`List[str]`, *optional*): Instructions for complex human attention: https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55. Examples: Returns: [`~pipelines.sana.pipeline_output.SanaPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.sana.pipeline_output.SanaPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct if use_resolution_binning: if self.transformer.config.sample_size == 128: aspect_ratio_bin = ASPECT_RATIO_4096_BIN elif self.transformer.config.sample_size == 64: aspect_ratio_bin = ASPECT_RATIO_2048_BIN elif self.transformer.config.sample_size == 32: aspect_ratio_bin = ASPECT_RATIO_1024_BIN elif self.transformer.config.sample_size == 16: aspect_ratio_bin = ASPECT_RATIO_512_BIN else: raise ValueError("Invalid sample size") orig_height, orig_width = height, width height, width = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) self.check_inputs( prompt, height, width, callback_on_step_end_tensor_inputs, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False # 2. Default height and width to transformer if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.attention_kwargs.get("scale", None) if self.attention_kwargs is not None else None # 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt, self.do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length, complex_human_instruction=complex_human_instruction, lora_scale=lora_scale, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, torch.float32, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) transformer_dtype = self.transformer.dtype with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) timestep = timestep * self.transformer.config.timestep_scale # predict noise model_output noise_pred = self.transformer( latent_model_input.to(dtype=transformer_dtype), encoder_hidden_states=prompt_embeds.to(dtype=transformer_dtype), encoder_attention_mask=prompt_attention_mask, timestep=timestep, return_dict=False, attention_kwargs=self.attention_kwargs, )[0] noise_pred = noise_pred.float() # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: noise_pred = noise_pred.chunk(2, dim=1)[0] # compute previous image: x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": image = latents else: latents = latents.to(self.vae.dtype) torch_accelerator_module = getattr(torch, get_device(), torch.cuda) oom_error = ( torch.OutOfMemoryError if is_torch_version(">=", "2.5.0") else torch_accelerator_module.OutOfMemoryError ) try: image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] except oom_error as e: warnings.warn( f"{e}. \n" f"Try to use VAE tiling for large images. For example: \n" f"pipe.vae.enable_tiling(tile_sample_min_width=512, tile_sample_min_height=512)" ) if use_resolution_binning: image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) if not output_type == "latent": image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return SanaPipelineOutput(images=image)
diffusers/src/diffusers/pipelines/sana/pipeline_sana.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/sana/pipeline_sana.py", "repo_id": "diffusers", "token_count": 21253 }
177
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect from typing import Callable, List, Optional, Union import torch from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser from k_diffusion.sampling import BrownianTreeNoiseSampler, get_sigmas_karras from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPTokenizerFast, ) from ...image_processor import VaeImageProcessor from ...loaders import ( StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin, ) from ...models import AutoencoderKL, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ModelWrapper: def __init__(self, model, alphas_cumprod): self.model = model self.alphas_cumprod = alphas_cumprod def apply_model(self, *args, **kwargs): if len(args) == 3: encoder_hidden_states = args[-1] args = args[:2] if kwargs.get("cond", None) is not None: encoder_hidden_states = kwargs.pop("cond") return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample class StableDiffusionKDiffusionPipeline( DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights <Tip warning={true}> This is an experimental pipeline and is likely to change in the future. </Tip> Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _last_supported_version = "0.33.1" model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: Union[CLIPTokenizer, CLIPTokenizerFast], unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() logger.info( f"{self.__class__} is an experimental pipeline and is likely to change in the future. We recommend to use" " this pipeline for fast experimentation / iteration if needed, but advice to rely on existing pipelines" " as defined in https://huggingface.co/docs/diffusers/api/schedulers#implemented-schedulers for" " production settings." ) # get correct sigmas from LMS scheduler = LMSDiscreteScheduler.from_config(scheduler.config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.register_to_config(requires_safety_checker=requires_safety_checker) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) model = ModelWrapper(unet, scheduler.alphas_cumprod) if scheduler.config.prediction_type == "v_prediction": self.k_diffusion_model = CompVisVDenoiser(model) else: self.k_diffusion_model = CompVisDenoiser(model) def set_scheduler(self, scheduler_type: str): library = importlib.import_module("k_diffusion") sampling = getattr(library, "sampling") try: self.sampler = getattr(sampling, scheduler_type) except Exception: valid_samplers = [] for s in dir(sampling): if "sample_" in s: valid_samplers.append(s) raise ValueError(f"Invalid scheduler type {scheduler_type}. Please choose one of {valid_samplers}.") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, use_karras_sigmas: Optional[bool] = False, noise_sampler_seed: Optional[int] = None, clip_skip: int = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Use karras sigmas. For example, specifying `sample_dpmpp_2m` to `set_scheduler` will be equivalent to `DPM++2M` in stable-diffusion-webui. On top of that, setting this option to True will make it `DPM++2M Karras`. noise_sampler_seed (`int`, *optional*, defaults to `None`): The random seed to use for the noise sampler. If `None`, a random seed will be generated. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = True if guidance_scale <= 1.0: raise ValueError("has to use guidance_scale") # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=prompt_embeds.device) # 5. Prepare sigmas if use_karras_sigmas: sigma_min: float = self.k_diffusion_model.sigmas[0].item() sigma_max: float = self.k_diffusion_model.sigmas[-1].item() sigmas = get_sigmas_karras(n=num_inference_steps, sigma_min=sigma_min, sigma_max=sigma_max) else: sigmas = self.scheduler.sigmas sigmas = sigmas.to(device) sigmas = sigmas.to(prompt_embeds.dtype) # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) latents = latents * sigmas[0] self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device) self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device) # 7. Define model function def model_fn(x, t): latent_model_input = torch.cat([x] * 2) t = torch.cat([t] * 2) noise_pred = self.k_diffusion_model(latent_model_input, t, cond=prompt_embeds) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) return noise_pred # 8. Run k-diffusion solver sampler_kwargs = {} if "noise_sampler" in inspect.signature(self.sampler).parameters: min_sigma, max_sigma = sigmas[sigmas > 0].min(), sigmas.max() noise_sampler = BrownianTreeNoiseSampler(latents, min_sigma, max_sigma, noise_sampler_seed) sampler_kwargs["noise_sampler"] = noise_sampler if "generator" in inspect.signature(self.sampler).parameters: sampler_kwargs["generator"] = generator latents = self.sampler(model_fn, latents, sigmas, **sampler_kwargs) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/pipeline_stable_diffusion_k_diffusion.py", "repo_id": "diffusers", "token_count": 14839 }
178
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline _dummy_objects.update( {"UnCLIPImageVariationPipeline": UnCLIPImageVariationPipeline, "UnCLIPPipeline": UnCLIPPipeline} ) else: _import_structure["pipeline_unclip"] = ["UnCLIPPipeline"] _import_structure["pipeline_unclip_image_variation"] = ["UnCLIPImageVariationPipeline"] _import_structure["text_proj"] = ["UnCLIPTextProjModel"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/unclip/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unclip/__init__.py", "repo_id": "diffusers", "token_count": 700 }
179
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import regex as re import torch from transformers import AutoTokenizer, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import WanLoraLoaderMixin from ...models import AutoencoderKLWan, WanVACETransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import WanPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> import PIL.Image >>> from diffusers import AutoencoderKLWan, WanVACEPipeline >>> from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler >>> from diffusers.utils import export_to_video, load_image def prepare_video_and_mask(first_img: PIL.Image.Image, last_img: PIL.Image.Image, height: int, width: int, num_frames: int): first_img = first_img.resize((width, height)) last_img = last_img.resize((width, height)) frames = [] frames.append(first_img) # Ideally, this should be 127.5 to match original code, but they perform computation on numpy arrays # whereas we are passing PIL images. If you choose to pass numpy arrays, you can set it to 127.5 to # match the original code. frames.extend([PIL.Image.new("RGB", (width, height), (128, 128, 128))] * (num_frames - 2)) frames.append(last_img) mask_black = PIL.Image.new("L", (width, height), 0) mask_white = PIL.Image.new("L", (width, height), 255) mask = [mask_black, *[mask_white] * (num_frames - 2), mask_black] return frames, mask >>> # Available checkpoints: Wan-AI/Wan2.1-VACE-1.3B-diffusers, Wan-AI/Wan2.1-VACE-14B-diffusers >>> model_id = "Wan-AI/Wan2.1-VACE-1.3B-diffusers" >>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) >>> pipe = WanVACEPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16) >>> flow_shift = 3.0 # 5.0 for 720P, 3.0 for 480P >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift) >>> pipe.to("cuda") >>> prompt = "CG animation style, a small blue bird takes off from the ground, flapping its wings. The bird's feathers are delicate, with a unique pattern on its chest. The background shows a blue sky with white clouds under bright sunshine. The camera follows the bird upward, capturing its flight and the vastness of the sky from a close-up, low-angle perspective." >>> negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" >>> first_frame = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flf2v_input_first_frame.png" ... ) >>> last_frame = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flf2v_input_last_frame.png>>> " ... ) >>> height = 512 >>> width = 512 >>> num_frames = 81 >>> video, mask = prepare_video_and_mask(first_frame, last_frame, height, width, num_frames) >>> output = pipe( ... video=video, ... mask=mask, ... prompt=prompt, ... negative_prompt=negative_prompt, ... height=height, ... width=width, ... num_frames=num_frames, ... num_inference_steps=30, ... guidance_scale=5.0, ... generator=torch.Generator().manual_seed(42), ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=16) ``` """ def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): text = whitespace_clean(basic_clean(text)) return text # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class WanVACEPipeline(DiffusionPipeline, WanLoraLoaderMixin): r""" Pipeline for controllable generation using Wan. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: tokenizer ([`T5Tokenizer`]): Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. transformer ([`WanTransformer3DModel`]): Conditional Transformer to denoise the input latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: UMT5EncoderModel, transformer: WanVACETransformer3DModel, vae: AutoencoderKLWan, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_clean(u) for u in prompt] batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask seq_lens = mask.gt(0).sum(dim=1).long() prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def check_inputs( self, prompt, negative_prompt, height, width, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, video=None, mask=None, reference_images=None, ): base = self.vae_scale_factor_spatial * self.transformer.config.patch_size[1] if height % base != 0 or width % base != 0: raise ValueError(f"`height` and `width` have to be divisible by {base} but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") if video is not None: if mask is not None: if len(video) != len(mask): raise ValueError( f"Length of `video` {len(video)} and `mask` {len(mask)} do not match. Please make sure that" " they have the same length." ) if reference_images is not None: is_pil_image = isinstance(reference_images, PIL.Image.Image) is_list_of_pil_images = isinstance(reference_images, list) and all( isinstance(ref_img, PIL.Image.Image) for ref_img in reference_images ) is_list_of_list_of_pil_images = isinstance(reference_images, list) and all( isinstance(ref_img, list) and all(isinstance(ref_img_, PIL.Image.Image) for ref_img_ in ref_img) for ref_img in reference_images ) if not (is_pil_image or is_list_of_pil_images or is_list_of_list_of_pil_images): raise ValueError( "`reference_images` has to be of type `PIL.Image.Image` or `list` of `PIL.Image.Image`, or " "`list` of `list` of `PIL.Image.Image`, but is {type(reference_images)}" ) if is_list_of_list_of_pil_images and len(reference_images) != 1: raise ValueError( "The pipeline only supports generating one video at a time at the moment. When passing a list " "of list of reference images, where the outer list corresponds to the batch size and the inner " "list corresponds to list of conditioning images per video, please make sure to only pass " "one inner list of reference images (i.e., `[[<image1>, <image2>, ...]]`" ) elif mask is not None: raise ValueError("`mask` can only be passed if `video` is passed as well.") def preprocess_conditions( self, video: Optional[List[PipelineImageInput]] = None, mask: Optional[List[PipelineImageInput]] = None, reference_images: Optional[Union[PIL.Image.Image, List[PIL.Image.Image], List[List[PIL.Image.Image]]]] = None, batch_size: int = 1, height: int = 480, width: int = 832, num_frames: int = 81, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, ): if video is not None: base = self.vae_scale_factor_spatial * self.transformer.config.patch_size[1] video_height, video_width = self.video_processor.get_default_height_width(video[0]) if video_height * video_width > height * width: scale = min(width / video_width, height / video_height) video_height, video_width = int(video_height * scale), int(video_width * scale) if video_height % base != 0 or video_width % base != 0: logger.warning( f"Video height and width should be divisible by {base}, but got {video_height} and {video_width}. " ) video_height = (video_height // base) * base video_width = (video_width // base) * base assert video_height * video_width <= height * width video = self.video_processor.preprocess_video(video, video_height, video_width) image_size = (video_height, video_width) # Use the height/width of video (with possible rescaling) else: video = torch.zeros(batch_size, 3, num_frames, height, width, dtype=dtype, device=device) image_size = (height, width) # Use the height/width provider by user if mask is not None: mask = self.video_processor.preprocess_video(mask, image_size[0], image_size[1]) mask = torch.clamp((mask + 1) / 2, min=0, max=1) else: mask = torch.ones_like(video) video = video.to(dtype=dtype, device=device) mask = mask.to(dtype=dtype, device=device) # Make a list of list of images where the outer list corresponds to video batch size and the inner list # corresponds to list of conditioning images per video if reference_images is None or isinstance(reference_images, PIL.Image.Image): reference_images = [[reference_images] for _ in range(video.shape[0])] elif isinstance(reference_images, (list, tuple)) and isinstance(next(iter(reference_images)), PIL.Image.Image): reference_images = [reference_images] elif ( isinstance(reference_images, (list, tuple)) and isinstance(next(iter(reference_images)), list) and isinstance(next(iter(reference_images[0])), PIL.Image.Image) ): reference_images = reference_images else: raise ValueError( "`reference_images` has to be of type `PIL.Image.Image` or `list` of `PIL.Image.Image`, or " "`list` of `list` of `PIL.Image.Image`, but is {type(reference_images)}" ) if video.shape[0] != len(reference_images): raise ValueError( f"Batch size of `video` {video.shape[0]} and length of `reference_images` {len(reference_images)} does not match." ) ref_images_lengths = [len(reference_images_batch) for reference_images_batch in reference_images] if any(l != ref_images_lengths[0] for l in ref_images_lengths): raise ValueError( f"All batches of `reference_images` should have the same length, but got {ref_images_lengths}. Support for this " "may be added in the future." ) reference_images_preprocessed = [] for i, reference_images_batch in enumerate(reference_images): preprocessed_images = [] for j, image in enumerate(reference_images_batch): if image is None: continue image = self.video_processor.preprocess(image, None, None) img_height, img_width = image.shape[-2:] scale = min(image_size[0] / img_height, image_size[1] / img_width) new_height, new_width = int(img_height * scale), int(img_width * scale) resized_image = torch.nn.functional.interpolate( image, size=(new_height, new_width), mode="bilinear", align_corners=False ).squeeze(0) # [C, H, W] top = (image_size[0] - new_height) // 2 left = (image_size[1] - new_width) // 2 canvas = torch.ones(3, *image_size, device=device, dtype=dtype) canvas[:, top : top + new_height, left : left + new_width] = resized_image preprocessed_images.append(canvas) reference_images_preprocessed.append(preprocessed_images) return video, mask, reference_images_preprocessed def prepare_video_latents( self, video: torch.Tensor, mask: torch.Tensor, reference_images: Optional[List[List[torch.Tensor]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, device: Optional[torch.device] = None, ) -> torch.Tensor: device = device or self._execution_device if isinstance(generator, list): # TODO: support this raise ValueError("Passing a list of generators is not yet supported. This may be supported in the future.") if reference_images is None: # For each batch of video, we set no re # ference image (as one or more can be passed by user) reference_images = [[None] for _ in range(video.shape[0])] else: if video.shape[0] != len(reference_images): raise ValueError( f"Batch size of `video` {video.shape[0]} and length of `reference_images` {len(reference_images)} does not match." ) if video.shape[0] != 1: # TODO: support this raise ValueError( "Generating with more than one video is not yet supported. This may be supported in the future." ) vae_dtype = self.vae.dtype video = video.to(dtype=vae_dtype) latents_mean = torch.tensor(self.vae.config.latents_mean, device=device, dtype=torch.float32).view( 1, self.vae.config.z_dim, 1, 1, 1 ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std, device=device, dtype=torch.float32).view( 1, self.vae.config.z_dim, 1, 1, 1 ) if mask is None: latents = retrieve_latents(self.vae.encode(video), generator, sample_mode="argmax").unbind(0) latents = ((latents.float() - latents_mean) * latents_std).to(vae_dtype) else: mask = torch.where(mask > 0.5, 1.0, 0.0).to(dtype=vae_dtype) inactive = video * (1 - mask) reactive = video * mask inactive = retrieve_latents(self.vae.encode(inactive), generator, sample_mode="argmax") reactive = retrieve_latents(self.vae.encode(reactive), generator, sample_mode="argmax") inactive = ((inactive.float() - latents_mean) * latents_std).to(vae_dtype) reactive = ((reactive.float() - latents_mean) * latents_std).to(vae_dtype) latents = torch.cat([inactive, reactive], dim=1) latent_list = [] for latent, reference_images_batch in zip(latents, reference_images): for reference_image in reference_images_batch: assert reference_image.ndim == 3 reference_image = reference_image.to(dtype=vae_dtype) reference_image = reference_image[None, :, None, :, :] # [1, C, 1, H, W] reference_latent = retrieve_latents(self.vae.encode(reference_image), generator, sample_mode="argmax") reference_latent = ((reference_latent.float() - latents_mean) * latents_std).to(vae_dtype) reference_latent = reference_latent.squeeze(0) # [C, 1, H, W] reference_latent = torch.cat([reference_latent, torch.zeros_like(reference_latent)], dim=0) latent = torch.cat([reference_latent.squeeze(0), latent], dim=1) latent_list.append(latent) return torch.stack(latent_list) def prepare_masks( self, mask: torch.Tensor, reference_images: Optional[List[torch.Tensor]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, ) -> torch.Tensor: if isinstance(generator, list): # TODO: support this raise ValueError("Passing a list of generators is not yet supported. This may be supported in the future.") if reference_images is None: # For each batch of video, we set no reference image (as one or more can be passed by user) reference_images = [[None] for _ in range(mask.shape[0])] else: if mask.shape[0] != len(reference_images): raise ValueError( f"Batch size of `mask` {mask.shape[0]} and length of `reference_images` {len(reference_images)} does not match." ) if mask.shape[0] != 1: # TODO: support this raise ValueError( "Generating with more than one video is not yet supported. This may be supported in the future." ) transformer_patch_size = self.transformer.config.patch_size[1] mask_list = [] for mask_, reference_images_batch in zip(mask, reference_images): num_channels, num_frames, height, width = mask_.shape new_num_frames = (num_frames + self.vae_scale_factor_temporal - 1) // self.vae_scale_factor_temporal new_height = height // (self.vae_scale_factor_spatial * transformer_patch_size) * transformer_patch_size new_width = width // (self.vae_scale_factor_spatial * transformer_patch_size) * transformer_patch_size mask_ = mask_[0, :, :, :] mask_ = mask_.view( num_frames, new_height, self.vae_scale_factor_spatial, new_width, self.vae_scale_factor_spatial ) mask_ = mask_.permute(2, 4, 0, 1, 3).flatten(0, 1) # [8x8, num_frames, new_height, new_width] mask_ = torch.nn.functional.interpolate( mask_.unsqueeze(0), size=(new_num_frames, new_height, new_width), mode="nearest-exact" ).squeeze(0) num_ref_images = len(reference_images_batch) if num_ref_images > 0: mask_padding = torch.zeros_like(mask_[:, :num_ref_images, :, :]) mask_ = torch.cat([mask_padding, mask_], dim=1) mask_list.append(mask_) return torch.stack(mask_list) def prepare_latents( self, batch_size: int, num_channels_latents: int = 16, height: int = 480, width: int = 832, num_frames: int = 81, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 shape = ( batch_size, num_channels_latents, num_latent_frames, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @property def attention_kwargs(self): return self._attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: Union[str, List[str]] = None, video: Optional[List[PipelineImageInput]] = None, mask: Optional[List[PipelineImageInput]] = None, reference_images: Optional[List[PipelineImageInput]] = None, conditioning_scale: Union[float, List[float], torch.Tensor] = 1.0, height: int = 480, width: int = 832, num_frames: int = 81, num_inference_steps: int = 50, guidance_scale: float = 5.0, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "np", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds` instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). video (`List[PIL.Image.Image]`, *optional*): The input video or videos to be used as a starting point for the generation. The video should be a list of PIL images, a numpy array, or a torch tensor. Currently, the pipeline only supports generating one video at a time. mask (`List[PIL.Image.Image]`, *optional*): The input mask defines which video regions to condition on and which to generate. Black areas in the mask indicate conditioning regions, while white areas indicate regions for generation. The mask should be a list of PIL images, a numpy array, or a torch tensor. Currently supports generating a single video at a time. reference_images (`List[PIL.Image.Image]`, *optional*): A list of one or more reference images as extra conditioning for the generation. For example, if you are trying to inpaint a video to change the character, you can pass reference images of the new character here. Refer to the Diffusers [examples](https://github.com/huggingface/diffusers/pull/11582) and original [user guide](https://github.com/ali-vilab/VACE/blob/0897c6d055d7d9ea9e191dce763006664d9780f8/UserGuide.md) for a full list of supported tasks and use cases. conditioning_scale (`float`, `List[float]`, `torch.Tensor`, defaults to `1.0`): The conditioning scale to be applied when adding the control conditioning latent stream to the denoising latent stream in each control layer of the model. If a float is provided, it will be applied uniformly to all layers. If a list or tensor is provided, it should have the same length as the number of control layers in the model (`len(transformer.config.vace_layers)`). height (`int`, defaults to `480`): The height in pixels of the generated image. width (`int`, defaults to `832`): The width in pixels of the generated image. num_frames (`int`, defaults to `81`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `5.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`WanPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `512`): The maximum sequence length of the text encoder. If the prompt is longer than this, it will be truncated. If the prompt is shorter, it will be padded to this length. Examples: Returns: [`~WanPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`WanPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # Simplification of implementation for now if not isinstance(prompt, str): raise ValueError("Passing a list of prompts is not yet supported. This may be supported in the future.") if num_videos_per_prompt != 1: raise ValueError( "Generating multiple videos per prompt is not yet supported. This may be supported in the future." ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt, height, width, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, video, mask, reference_images, ) if num_frames % self.vae_scale_factor_temporal != 1: logger.warning( f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number." ) num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 num_frames = max(num_frames, 1) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] vae_dtype = self.vae.dtype transformer_dtype = self.transformer.dtype if isinstance(conditioning_scale, (int, float)): conditioning_scale = [conditioning_scale] * len(self.transformer.config.vace_layers) if isinstance(conditioning_scale, list): if len(conditioning_scale) != len(self.transformer.config.vace_layers): raise ValueError( f"Length of `conditioning_scale` {len(conditioning_scale)} does not match number of layers {len(self.transformer.config.vace_layers)}." ) conditioning_scale = torch.tensor(conditioning_scale) if isinstance(conditioning_scale, torch.Tensor): if conditioning_scale.size(0) != len(self.transformer.config.vace_layers): raise ValueError( f"Length of `conditioning_scale` {conditioning_scale.size(0)} does not match number of layers {len(self.transformer.config.vace_layers)}." ) conditioning_scale = conditioning_scale.to(device=device, dtype=transformer_dtype) # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) prompt_embeds = prompt_embeds.to(transformer_dtype) if negative_prompt_embeds is not None: negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables video, mask, reference_images = self.preprocess_conditions( video, mask, reference_images, batch_size, height, width, num_frames, torch.float32, device, ) num_reference_images = len(reference_images[0]) conditioning_latents = self.prepare_video_latents(video, mask, reference_images, generator, device) mask = self.prepare_masks(mask, reference_images, generator) conditioning_latents = torch.cat([conditioning_latents, mask], dim=1) conditioning_latents = conditioning_latents.to(transformer_dtype) num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames + num_reference_images * self.vae_scale_factor_temporal, torch.float32, device, generator, latents, ) if conditioning_latents.shape[2] != latents.shape[2]: logger.warning( "The number of frames in the conditioning latents does not match the number of frames to be generated. Generation quality may be affected." ) # 6. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = latents.to(transformer_dtype) timestep = t.expand(latents.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, control_hidden_states=conditioning_latents, control_hidden_states_scale=conditioning_scale, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_uncond = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, control_hidden_states=conditioning_latents, control_hidden_states_scale=conditioning_scale, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents[:, :, num_reference_images:] latents = latents.to(vae_dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return WanPipelineOutput(frames=video)
diffusers/src/diffusers/pipelines/wan/pipeline_wan_vace.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/wan/pipeline_wan_vace.py", "repo_id": "diffusers", "token_count": 21356 }
180
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from https://github.com/huggingface/transformers/blob/c409cd81777fb27aadc043ed3d8339dbc020fb3b/src/transformers/integrations/bitsandbytes.py """ import inspect from inspect import signature from typing import Union from ...utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging from ..quantization_config import QuantizationMethod if is_torch_available(): import torch import torch.nn as nn if is_bitsandbytes_available(): import bitsandbytes as bnb if is_accelerate_available(): import accelerate from accelerate import init_empty_weights from accelerate.hooks import add_hook_to_module, remove_hook_from_module logger = logging.get_logger(__name__) def _replace_with_bnb_linear( model, modules_to_not_convert=None, current_key_name=None, quantization_config=None, has_been_replaced=False, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successful or not. """ for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) if not any( (key + "." in current_key_name_str) or (key == current_key_name_str) for key in modules_to_not_convert ): with init_empty_weights(): in_features = module.in_features out_features = module.out_features if quantization_config.quantization_method() == "llm_int8": model._modules[name] = bnb.nn.Linear8bitLt( in_features, out_features, module.bias is not None, has_fp16_weights=quantization_config.llm_int8_has_fp16_weight, threshold=quantization_config.llm_int8_threshold, ) has_been_replaced = True else: if ( quantization_config.llm_int8_skip_modules is not None and name in quantization_config.llm_int8_skip_modules ): pass else: extra_kwargs = ( {"quant_storage": quantization_config.bnb_4bit_quant_storage} if "quant_storage" in list(signature(bnb.nn.Linear4bit).parameters) else {} ) model._modules[name] = bnb.nn.Linear4bit( in_features, out_features, module.bias is not None, quantization_config.bnb_4bit_compute_dtype, compress_statistics=quantization_config.bnb_4bit_use_double_quant, quant_type=quantization_config.bnb_4bit_quant_type, **extra_kwargs, ) has_been_replaced = True # Store the module class in case we need to transpose the weight later model._modules[name].source_cls = type(module) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(False) if len(list(module.children())) > 0: _, has_been_replaced = _replace_with_bnb_linear( module, modules_to_not_convert, current_key_name, quantization_config, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def replace_with_bnb_linear(model, modules_to_not_convert=None, current_key_name=None, quantization_config=None): """ Helper function to replace the `nn.Linear` layers within `model` with either `bnb.nn.Linear8bit` or `bnb.nn.Linear4bit` using the `bitsandbytes` library. References: * `bnb.nn.Linear8bit`: [LLM.int8(): 8-bit Matrix Multiplication for Transformers at Scale](https://huggingface.co/papers/2208.07339) * `bnb.nn.Linear4bit`: [QLoRA: Efficient Finetuning of Quantized LLMs](https://huggingface.co/papers/2305.14314) Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[`str`]`, *optional*, defaults to `[]`): Names of the modules to not convert in `Linear8bitLt`. In practice we keep the `modules_to_not_convert` in full precision for numerical stability reasons. current_key_name (`List[`str`]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert (for instances modules that are offloaded to `cpu` or `disk`). quantization_config ('transformers.utils.quantization_config.BitsAndBytesConfig'): To configure and manage settings related to quantization, a technique used to compress neural network models by reducing the precision of the weights and activations, thus making models more efficient in terms of both storage and computation. """ model, _ = _replace_with_bnb_linear(model, modules_to_not_convert, current_key_name, quantization_config) has_been_replaced = any( isinstance(replaced_module, (bnb.nn.Linear4bit, bnb.nn.Linear8bitLt)) for _, replaced_module in model.named_modules() ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model # Adapted from PEFT: https://github.com/huggingface/peft/blob/6d458b300fc2ed82e19f796b53af4c97d03ea604/src/peft/utils/integrations.py#L81 def dequantize_bnb_weight(weight: "torch.nn.Parameter", state=None, dtype: "torch.dtype" = None): """ Helper function to dequantize 4bit or 8bit bnb weights. If the weight is not a bnb quantized weight, it will be returned as is. """ if not isinstance(weight, torch.nn.Parameter): raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead") cls_name = weight.__class__.__name__ if cls_name not in ("Params4bit", "Int8Params"): return weight if cls_name == "Params4bit": output_tensor = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) msg = f"The model is going to be dequantized in {output_tensor.dtype} - if you want to upcast it to another dtype, make sure to pass the desired dtype when quantizing the model through `bnb_4bit_quant_type` argument of `BitsAndBytesConfig`" if dtype: msg = f"The model is going to be first dequantized in {output_tensor.dtype} and type-casted to {dtype}" output_tensor = output_tensor.to(dtype) logger.warning_once(msg) return output_tensor if state.SCB is None: state.SCB = weight.SCB if hasattr(bnb.functional, "int8_vectorwise_dequant"): # Use bitsandbytes API if available (requires v0.45.0+) dequantized = bnb.functional.int8_vectorwise_dequant(weight.data, state.SCB) else: # Multiply by (scale/127) to dequantize. dequantized = weight.data * state.SCB.view(-1, 1) * 7.874015718698502e-3 if dtype: dequantized = dequantized.to(dtype) return dequantized def _create_accelerate_new_hook(old_hook): r""" Creates a new hook based on the old hook. Use it only if you know what you are doing ! This method is a copy of: https://github.com/huggingface/peft/blob/748f7968f3a31ec06a1c2b0328993319ad9a150a/src/peft/utils/other.py#L245 with some changes """ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) old_hook_attr = old_hook.__dict__ filtered_old_hook_attr = {} old_hook_init_signature = inspect.signature(old_hook_cls.__init__) for k in old_hook_attr.keys(): if k in old_hook_init_signature.parameters: filtered_old_hook_attr[k] = old_hook_attr[k] new_hook = old_hook_cls(**filtered_old_hook_attr) return new_hook def _dequantize_and_replace( model, dtype, modules_to_not_convert=None, current_key_name=None, quantization_config=None, has_been_replaced=False, ): """ Converts a quantized model into its dequantized original version. The newly converted model will have some performance drop compared to the original model before quantization - use it only for specific usecases such as QLoRA adapters merging. Returns the converted model and a boolean that indicates if the conversion has been successful or not. """ quant_method = quantization_config.quantization_method() target_cls = bnb.nn.Linear8bitLt if quant_method == "llm_int8" else bnb.nn.Linear4bit for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, target_cls) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) if not any( (key + "." in current_key_name_str) or (key == current_key_name_str) for key in modules_to_not_convert ): bias = getattr(module, "bias", None) device = module.weight.device with init_empty_weights(): new_module = torch.nn.Linear(module.in_features, module.out_features, bias=bias is not None) if quant_method == "llm_int8": state = module.state else: state = None new_module.weight = torch.nn.Parameter(dequantize_bnb_weight(module.weight, state, dtype)) if bias is not None: new_module.bias = bias # Create a new hook and attach it in case we use accelerate if hasattr(module, "_hf_hook"): old_hook = module._hf_hook new_hook = _create_accelerate_new_hook(old_hook) remove_hook_from_module(module) add_hook_to_module(new_module, new_hook) new_module.to(device) model._modules[name] = new_module has_been_replaced = True if len(list(module.children())) > 0: _, has_been_replaced = _dequantize_and_replace( module, dtype=dtype, modules_to_not_convert=modules_to_not_convert, current_key_name=current_key_name, quantization_config=quantization_config, has_been_replaced=has_been_replaced, ) # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def dequantize_and_replace( model, modules_to_not_convert=None, quantization_config=None, ): model, _ = _dequantize_and_replace( model, dtype=model.dtype, modules_to_not_convert=modules_to_not_convert, quantization_config=quantization_config, ) has_been_replaced = any( isinstance(replaced_module, torch.nn.Linear) for _, replaced_module in model.named_modules() ) if not has_been_replaced: logger.warning( "Some linear modules were not dequantized. This could lead to unexpected behaviour. Please check your model." ) return model def _check_bnb_status(module) -> Union[bool, bool]: is_loaded_in_4bit_bnb = ( hasattr(module, "is_loaded_in_4bit") and module.is_loaded_in_4bit and getattr(module, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES ) is_loaded_in_8bit_bnb = ( hasattr(module, "is_loaded_in_8bit") and module.is_loaded_in_8bit and getattr(module, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES ) return is_loaded_in_4bit_bnb or is_loaded_in_8bit_bnb, is_loaded_in_4bit_bnb, is_loaded_in_8bit_bnb
diffusers/src/diffusers/quantizers/bitsandbytes/utils.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/bitsandbytes/utils.py", "repo_id": "diffusers", "token_count": 6116 }
181
import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin def gumbel_noise(t, generator=None): device = generator.device if generator is not None else t.device noise = torch.zeros_like(t, device=device).uniform_(0, 1, generator=generator).to(t.device) return -torch.log((-torch.log(noise.clamp(1e-20))).clamp(1e-20)) def mask_by_random_topk(mask_len, probs, temperature=1.0, generator=None): confidence = torch.log(probs.clamp(1e-20)) + temperature * gumbel_noise(probs, generator=generator) sorted_confidence = torch.sort(confidence, dim=-1).values cut_off = torch.gather(sorted_confidence, 1, mask_len.long()) masking = confidence < cut_off return masking @dataclass class AmusedSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor pred_original_sample: torch.Tensor = None class AmusedScheduler(SchedulerMixin, ConfigMixin): order = 1 temperatures: torch.Tensor @register_to_config def __init__( self, mask_token_id: int, masking_schedule: str = "cosine", ): self.temperatures = None self.timesteps = None def set_timesteps( self, num_inference_steps: int, temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), device: Union[str, torch.device] = None, ): self.timesteps = torch.arange(num_inference_steps, device=device).flip(0) if isinstance(temperature, (tuple, list)): self.temperatures = torch.linspace(temperature[0], temperature[1], num_inference_steps, device=device) else: self.temperatures = torch.linspace(temperature, 0.01, num_inference_steps, device=device) def step( self, model_output: torch.Tensor, timestep: torch.long, sample: torch.LongTensor, starting_mask_ratio: int = 1, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[AmusedSchedulerOutput, Tuple]: two_dim_input = sample.ndim == 3 and model_output.ndim == 4 if two_dim_input: batch_size, codebook_size, height, width = model_output.shape sample = sample.reshape(batch_size, height * width) model_output = model_output.reshape(batch_size, codebook_size, height * width).permute(0, 2, 1) unknown_map = sample == self.config.mask_token_id probs = model_output.softmax(dim=-1) device = probs.device probs_ = probs.to(generator.device) if generator is not None else probs # handles when generator is on CPU if probs_.device.type == "cpu" and probs_.dtype != torch.float32: probs_ = probs_.float() # multinomial is not implemented for cpu half precision probs_ = probs_.reshape(-1, probs.size(-1)) pred_original_sample = torch.multinomial(probs_, 1, generator=generator).to(device=device) pred_original_sample = pred_original_sample[:, 0].view(*probs.shape[:-1]) pred_original_sample = torch.where(unknown_map, pred_original_sample, sample) if timestep == 0: prev_sample = pred_original_sample else: seq_len = sample.shape[1] step_idx = (self.timesteps == timestep).nonzero() ratio = (step_idx + 1) / len(self.timesteps) if self.config.masking_schedule == "cosine": mask_ratio = torch.cos(ratio * math.pi / 2) elif self.config.masking_schedule == "linear": mask_ratio = 1 - ratio else: raise ValueError(f"unknown masking schedule {self.config.masking_schedule}") mask_ratio = starting_mask_ratio * mask_ratio mask_len = (seq_len * mask_ratio).floor() # do not mask more than amount previously masked mask_len = torch.min(unknown_map.sum(dim=-1, keepdim=True) - 1, mask_len) # mask at least one mask_len = torch.max(torch.tensor([1], device=model_output.device), mask_len) selected_probs = torch.gather(probs, -1, pred_original_sample[:, :, None])[:, :, 0] # Ignores the tokens given in the input by overwriting their confidence. selected_probs = torch.where(unknown_map, selected_probs, torch.finfo(selected_probs.dtype).max) masking = mask_by_random_topk(mask_len, selected_probs, self.temperatures[step_idx], generator) # Masks tokens with lower confidence. prev_sample = torch.where(masking, self.config.mask_token_id, pred_original_sample) if two_dim_input: prev_sample = prev_sample.reshape(batch_size, height, width) pred_original_sample = pred_original_sample.reshape(batch_size, height, width) if not return_dict: return (prev_sample, pred_original_sample) return AmusedSchedulerOutput(prev_sample, pred_original_sample) def add_noise(self, sample, timesteps, generator=None): step_idx = (self.timesteps == timesteps).nonzero() ratio = (step_idx + 1) / len(self.timesteps) if self.config.masking_schedule == "cosine": mask_ratio = torch.cos(ratio * math.pi / 2) elif self.config.masking_schedule == "linear": mask_ratio = 1 - ratio else: raise ValueError(f"unknown masking schedule {self.config.masking_schedule}") mask_indices = ( torch.rand( sample.shape, device=generator.device if generator is not None else sample.device, generator=generator ).to(sample.device) < mask_ratio ) masked_sample = sample.clone() masked_sample[mask_indices] = self.config.mask_token_id return masked_sample
diffusers/src/diffusers/schedulers/scheduling_amused.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_amused.py", "repo_id": "diffusers", "token_count": 2775 }
182
# Copyright 2025 TSAIL Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver from dataclasses import dataclass from typing import List, Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, ) @flax.struct.dataclass class DPMSolverMultistepSchedulerState: common: CommonSchedulerState alpha_t: jnp.ndarray sigma_t: jnp.ndarray lambda_t: jnp.ndarray # setable values init_noise_sigma: jnp.ndarray timesteps: jnp.ndarray num_inference_steps: Optional[int] = None # running values model_outputs: Optional[jnp.ndarray] = None lower_order_nums: Optional[jnp.int32] = None prev_timestep: Optional[jnp.int32] = None cur_sample: Optional[jnp.ndarray] = None @classmethod def create( cls, common: CommonSchedulerState, alpha_t: jnp.ndarray, sigma_t: jnp.ndarray, lambda_t: jnp.ndarray, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, ): return cls( common=common, alpha_t=alpha_t, sigma_t=sigma_t, lambda_t=lambda_t, init_noise_sigma=init_noise_sigma, timesteps=timesteps, ) @dataclass class FlaxDPMSolverMultistepSchedulerOutput(FlaxSchedulerOutput): state: DPMSolverMultistepSchedulerState class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin): """ DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality samples, and it can generate quite good samples even in only 10 steps. For more details, see the original paper: https://huggingface.co/papers/2206.00927 and https://huggingface.co/papers/2211.01095 Currently, we support the multistep DPM-Solver for both noise prediction models and data prediction models. We recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. We also support the "dynamic thresholding" method in Imagen (https://huggingface.co/papers/2205.11487). For pixel-space diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as stable-diffusion). [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://huggingface.co/papers/2206.00927 and https://huggingface.co/papers/2211.01095 Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. solver_order (`int`, default `2`): the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. prediction_type (`str`, default `epsilon`): indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`, or `v-prediction`. thresholding (`bool`, default `False`): whether to use the "dynamic thresholding" method (introduced by Imagen, https://huggingface.co/papers/2205.11487). For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as stable-diffusion). dynamic_thresholding_ratio (`float`, default `0.995`): the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen (https://huggingface.co/papers/2205.11487). sample_max_value (`float`, default `1.0`): the threshold value for dynamic thresholding. Valid only when `thresholding=True` and `algorithm_type="dpmsolver++`. algorithm_type (`str`, default `dpmsolver++`): the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the algorithms in https://huggingface.co/papers/2206.00927, and the `dpmsolver++` type implements the algorithms in https://huggingface.co/papers/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided sampling (e.g. stable-diffusion). solver_type (`str`, default `midpoint`): the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are slightly better, so we recommend to use the `midpoint` type. lower_order_final (`bool`, default `True`): whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10. timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): the `dtype` used for params and computation. """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] dtype: jnp.dtype @property def has_state(self): return True @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[jnp.ndarray] = None, solver_order: int = 2, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, algorithm_type: str = "dpmsolver++", solver_type: str = "midpoint", lower_order_final: bool = True, timestep_spacing: str = "linspace", dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState] = None) -> DPMSolverMultistepSchedulerState: if common is None: common = CommonSchedulerState.create(self) # Currently we only support VP-type noise schedule alpha_t = jnp.sqrt(common.alphas_cumprod) sigma_t = jnp.sqrt(1 - common.alphas_cumprod) lambda_t = jnp.log(alpha_t) - jnp.log(sigma_t) # settings for DPM-Solver if self.config.algorithm_type not in ["dpmsolver", "dpmsolver++"]: raise NotImplementedError(f"{self.config.algorithm_type} is not implemented for {self.__class__}") if self.config.solver_type not in ["midpoint", "heun"]: raise NotImplementedError(f"{self.config.solver_type} is not implemented for {self.__class__}") # standard deviation of the initial noise distribution init_noise_sigma = jnp.array(1.0, dtype=self.dtype) timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] return DPMSolverMultistepSchedulerState.create( common=common, alpha_t=alpha_t, sigma_t=sigma_t, lambda_t=lambda_t, init_noise_sigma=init_noise_sigma, timesteps=timesteps, ) def set_timesteps( self, state: DPMSolverMultistepSchedulerState, num_inference_steps: int, shape: Tuple ) -> DPMSolverMultistepSchedulerState: """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`DPMSolverMultistepSchedulerState`): the `FlaxDPMSolverMultistepScheduler` state data class instance. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. shape (`Tuple`): the shape of the samples to be generated. """ last_timestep = self.config.num_train_timesteps if self.config.timestep_spacing == "linspace": timesteps = ( jnp.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].astype(jnp.int32) ) elif self.config.timestep_spacing == "leading": step_ratio = last_timestep // (num_inference_steps + 1) # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = ( (jnp.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(jnp.int32) ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = jnp.arange(last_timestep, 0, -step_ratio).round().copy().astype(jnp.int32) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) # initial running values model_outputs = jnp.zeros((self.config.solver_order,) + shape, dtype=self.dtype) lower_order_nums = jnp.int32(0) prev_timestep = jnp.int32(-1) cur_sample = jnp.zeros(shape, dtype=self.dtype) return state.replace( num_inference_steps=num_inference_steps, timesteps=timesteps, model_outputs=model_outputs, lower_order_nums=lower_order_nums, prev_timestep=prev_timestep, cur_sample=cur_sample, ) def convert_model_output( self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, ) -> jnp.ndarray: """ Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. So we need to first convert the model output to the corresponding type to match the algorithm. Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or DPM-Solver++ for both noise prediction model and data prediction model. Args: model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. Returns: `jnp.ndarray`: the converted model output. """ # DPM-Solver++ needs to solve an integral of the data prediction model. if self.config.algorithm_type == "dpmsolver++": if self.config.prediction_type == "epsilon": alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == "sample": x0_pred = model_output elif self.config.prediction_type == "v_prediction": alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] x0_pred = alpha_t * sample - sigma_t * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " " or `v_prediction` for the FlaxDPMSolverMultistepScheduler." ) if self.config.thresholding: # Dynamic thresholding in https://huggingface.co/papers/2205.11487 dynamic_max_val = jnp.percentile( jnp.abs(x0_pred), self.config.dynamic_thresholding_ratio, axis=tuple(range(1, x0_pred.ndim)) ) dynamic_max_val = jnp.maximum( dynamic_max_val, self.config.sample_max_value * jnp.ones_like(dynamic_max_val) ) x0_pred = jnp.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val return x0_pred # DPM-Solver needs to solve an integral of the noise prediction model. elif self.config.algorithm_type == "dpmsolver": if self.config.prediction_type == "epsilon": return model_output elif self.config.prediction_type == "sample": alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] epsilon = (sample - alpha_t * model_output) / sigma_t return epsilon elif self.config.prediction_type == "v_prediction": alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep] epsilon = alpha_t * model_output + sigma_t * sample return epsilon else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " " or `v_prediction` for the FlaxDPMSolverMultistepScheduler." ) def dpm_solver_first_order_update( self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, prev_timestep: int, sample: jnp.ndarray, ) -> jnp.ndarray: """ One step for the first-order DPM-Solver (equivalent to DDIM). See https://huggingface.co/papers/2206.00927 for the detailed derivation. Args: model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. prev_timestep (`int`): previous discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. Returns: `jnp.ndarray`: the sample tensor at the previous timestep. """ t, s0 = prev_timestep, timestep m0 = model_output lambda_t, lambda_s = state.lambda_t[t], state.lambda_t[s0] alpha_t, alpha_s = state.alpha_t[t], state.alpha_t[s0] sigma_t, sigma_s = state.sigma_t[t], state.sigma_t[s0] h = lambda_t - lambda_s if self.config.algorithm_type == "dpmsolver++": x_t = (sigma_t / sigma_s) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * m0 elif self.config.algorithm_type == "dpmsolver": x_t = (alpha_t / alpha_s) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * m0 return x_t def multistep_dpm_solver_second_order_update( self, state: DPMSolverMultistepSchedulerState, model_output_list: jnp.ndarray, timestep_list: List[int], prev_timestep: int, sample: jnp.ndarray, ) -> jnp.ndarray: """ One step for the second-order multistep DPM-Solver. Args: model_output_list (`List[jnp.ndarray]`): direct outputs from learned diffusion model at current and latter timesteps. timestep (`int`): current and latter discrete timestep in the diffusion chain. prev_timestep (`int`): previous discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. Returns: `jnp.ndarray`: the sample tensor at the previous timestep. """ t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2] m0, m1 = model_output_list[-1], model_output_list[-2] lambda_t, lambda_s0, lambda_s1 = state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1] alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0] sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) if self.config.algorithm_type == "dpmsolver++": # See https://huggingface.co/papers/2211.01095 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 - 0.5 * (alpha_t * (jnp.exp(-h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1 ) elif self.config.algorithm_type == "dpmsolver": # See https://huggingface.co/papers/2206.00927 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * D0 - 0.5 * (sigma_t * (jnp.exp(h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * D0 - (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1 ) return x_t def multistep_dpm_solver_third_order_update( self, state: DPMSolverMultistepSchedulerState, model_output_list: jnp.ndarray, timestep_list: List[int], prev_timestep: int, sample: jnp.ndarray, ) -> jnp.ndarray: """ One step for the third-order multistep DPM-Solver. Args: model_output_list (`List[jnp.ndarray]`): direct outputs from learned diffusion model at current and latter timesteps. timestep (`int`): current and latter discrete timestep in the diffusion chain. prev_timestep (`int`): previous discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. Returns: `jnp.ndarray`: the sample tensor at the previous timestep. """ t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3] m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] lambda_t, lambda_s0, lambda_s1, lambda_s2 = ( state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1], state.lambda_t[s2], ) alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0] sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0] h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 r0, r1 = h_0 / h, h_1 / h D0 = m0 D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) if self.config.algorithm_type == "dpmsolver++": # See https://huggingface.co/papers/2206.00927 for detailed derivations x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * D0 + (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1 - (alpha_t * ((jnp.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 ) elif self.config.algorithm_type == "dpmsolver": # See https://huggingface.co/papers/2206.00927 for detailed derivations x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * D0 - (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1 - (sigma_t * ((jnp.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 ) return x_t def step( self, state: DPMSolverMultistepSchedulerState, model_output: jnp.ndarray, timestep: int, sample: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxDPMSolverMultistepSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by DPM-Solver. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`DPMSolverMultistepSchedulerState`): the `FlaxDPMSolverMultistepScheduler` state data class instance. model_output (`jnp.ndarray`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`jnp.ndarray`): current instance of sample being created by diffusion process. return_dict (`bool`): option for returning tuple rather than FlaxDPMSolverMultistepSchedulerOutput class Returns: [`FlaxDPMSolverMultistepSchedulerOutput`] or `tuple`: [`FlaxDPMSolverMultistepSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if state.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) (step_index,) = jnp.where(state.timesteps == timestep, size=1) step_index = step_index[0] prev_timestep = jax.lax.select(step_index == len(state.timesteps) - 1, 0, state.timesteps[step_index + 1]) model_output = self.convert_model_output(state, model_output, timestep, sample) model_outputs_new = jnp.roll(state.model_outputs, -1, axis=0) model_outputs_new = model_outputs_new.at[-1].set(model_output) state = state.replace( model_outputs=model_outputs_new, prev_timestep=prev_timestep, cur_sample=sample, ) def step_1(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: return self.dpm_solver_first_order_update( state, state.model_outputs[-1], state.timesteps[step_index], state.prev_timestep, state.cur_sample, ) def step_23(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: def step_2(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: timestep_list = jnp.array([state.timesteps[step_index - 1], state.timesteps[step_index]]) return self.multistep_dpm_solver_second_order_update( state, state.model_outputs, timestep_list, state.prev_timestep, state.cur_sample, ) def step_3(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray: timestep_list = jnp.array( [ state.timesteps[step_index - 2], state.timesteps[step_index - 1], state.timesteps[step_index], ] ) return self.multistep_dpm_solver_third_order_update( state, state.model_outputs, timestep_list, state.prev_timestep, state.cur_sample, ) step_2_output = step_2(state) step_3_output = step_3(state) if self.config.solver_order == 2: return step_2_output elif self.config.lower_order_final and len(state.timesteps) < 15: return jax.lax.select( state.lower_order_nums < 2, step_2_output, jax.lax.select( step_index == len(state.timesteps) - 2, step_2_output, step_3_output, ), ) else: return jax.lax.select( state.lower_order_nums < 2, step_2_output, step_3_output, ) step_1_output = step_1(state) step_23_output = step_23(state) if self.config.solver_order == 1: prev_sample = step_1_output elif self.config.lower_order_final and len(state.timesteps) < 15: prev_sample = jax.lax.select( state.lower_order_nums < 1, step_1_output, jax.lax.select( step_index == len(state.timesteps) - 1, step_1_output, step_23_output, ), ) else: prev_sample = jax.lax.select( state.lower_order_nums < 1, step_1_output, step_23_output, ) state = state.replace( lower_order_nums=jnp.minimum(state.lower_order_nums + 1, self.config.solver_order), ) if not return_dict: return (prev_sample, state) return FlaxDPMSolverMultistepSchedulerOutput(prev_sample=prev_sample, state=state) def scale_model_input( self, state: DPMSolverMultistepSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None ) -> jnp.ndarray: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: state (`DPMSolverMultistepSchedulerState`): the `FlaxDPMSolverMultistepScheduler` state data class instance. sample (`jnp.ndarray`): input sample timestep (`int`, optional): current timestep Returns: `jnp.ndarray`: scaled input sample """ return sample def add_noise( self, state: DPMSolverMultistepSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray, ) -> jnp.ndarray: return add_noise_common(state.common, original_samples, noise, timesteps) def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py", "repo_id": "diffusers", "token_count": 13636 }
183
# Copyright 2025 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class KarrasVeSchedulerState: # setable values num_inference_steps: Optional[int] = None timesteps: Optional[jnp.ndarray] = None schedule: Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def create(cls): return cls() @dataclass class FlaxKarrasVeOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Derivative of predicted original image sample (x_0). state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. """ prev_sample: jnp.ndarray derivative: jnp.ndarray state: KarrasVeSchedulerState class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin): """ Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and the VE column of Table 1 from [1] for reference. [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." https://huggingface.co/papers/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic differential equations." https://huggingface.co/papers/2011.13456 [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of Diffusion-Based Generative Models." https://huggingface.co/papers/2206.00364. The grid search values used to find the optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper. Args: sigma_min (`float`): minimum noise magnitude sigma_max (`float`): maximum noise magnitude s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, 1.011]. s_churn (`float`): the parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity). A reasonable range is [0, 10]. s_max (`float`): the end value of the sigma range where we add noise. A reasonable range is [0.2, 80]. """ @property def has_state(self): return True @register_to_config def __init__( self, sigma_min: float = 0.02, sigma_max: float = 100, s_noise: float = 1.007, s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, ): pass def create_state(self): return KarrasVeSchedulerState.create() def set_timesteps( self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple = () ) -> KarrasVeSchedulerState: """ Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ timesteps = jnp.arange(0, num_inference_steps)[::-1].copy() schedule = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=num_inference_steps, schedule=jnp.array(schedule, dtype=jnp.float32), timesteps=timesteps, ) def add_noise_to_input( self, state: KarrasVeSchedulerState, sample: jnp.ndarray, sigma: float, key: jax.Array, ) -> Tuple[jnp.ndarray, float]: """ Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a higher noise level sigma_hat = sigma_i + gamma_i*sigma_i. TODO Args: """ if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) key = random.split(key, num=1) eps = self.config.s_noise * random.normal(key=key, shape=sample.shape) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def step( self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxKarrasVeOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.Tensor` or `np.ndarray`): TODO return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class Returns: [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def step_correct( self, state: KarrasVeSchedulerState, model_output: jnp.ndarray, sigma_hat: float, sigma_prev: float, sample_hat: jnp.ndarray, sample_prev: jnp.ndarray, derivative: jnp.ndarray, return_dict: bool = True, ) -> Union[FlaxKarrasVeOutput, Tuple]: """ Correct the predicted sample based on the output model_output of the network. TODO complete description Args: state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class. model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.Tensor` or `np.ndarray`): TODO sample_prev (`torch.Tensor` or `np.ndarray`): TODO derivative (`torch.Tensor` or `np.ndarray`): TODO return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class Returns: prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO """ pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state) def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps): raise NotImplementedError()
diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py", "repo_id": "diffusers", "token_count": 3951 }
184
# Copyright 2025 Microsoft and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin @dataclass class VQDiffusionSchedulerOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.LongTensor` of shape `(batch size, num latent pixels)`): Computed sample x_{t-1} of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: torch.LongTensor def index_to_log_onehot(x: torch.LongTensor, num_classes: int) -> torch.Tensor: """ Convert batch of vector of class indices into batch of log onehot vectors Args: x (`torch.LongTensor` of shape `(batch size, vector length)`): Batch of class indices num_classes (`int`): number of classes to be used for the onehot vectors Returns: `torch.Tensor` of shape `(batch size, num classes, vector length)`: Log onehot vectors """ x_onehot = F.one_hot(x, num_classes) x_onehot = x_onehot.permute(0, 2, 1) log_x = torch.log(x_onehot.float().clamp(min=1e-30)) return log_x def gumbel_noised(logits: torch.Tensor, generator: Optional[torch.Generator]) -> torch.Tensor: """ Apply gumbel noise to `logits` """ uniform = torch.rand(logits.shape, device=logits.device, generator=generator) gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30) noised = gumbel_noise + logits return noised def alpha_schedules(num_diffusion_timesteps: int, alpha_cum_start=0.99999, alpha_cum_end=0.000009): """ Cumulative and non-cumulative alpha schedules. See section 4.1. """ att = ( np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (alpha_cum_end - alpha_cum_start) + alpha_cum_start ) att = np.concatenate(([1], att)) at = att[1:] / att[:-1] att = np.concatenate((att[1:], [1])) return at, att def gamma_schedules(num_diffusion_timesteps: int, gamma_cum_start=0.000009, gamma_cum_end=0.99999): """ Cumulative and non-cumulative gamma schedules. See section 4.1. """ ctt = ( np.arange(0, num_diffusion_timesteps) / (num_diffusion_timesteps - 1) * (gamma_cum_end - gamma_cum_start) + gamma_cum_start ) ctt = np.concatenate(([0], ctt)) one_minus_ctt = 1 - ctt one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1] ct = 1 - one_minus_ct ctt = np.concatenate((ctt[1:], [0])) return ct, ctt class VQDiffusionScheduler(SchedulerMixin, ConfigMixin): """ A scheduler for vector quantized diffusion. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_vec_classes (`int`): The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. num_train_timesteps (`int`, defaults to 100): The number of diffusion steps to train the model. alpha_cum_start (`float`, defaults to 0.99999): The starting cumulative alpha value. alpha_cum_end (`float`, defaults to 0.00009): The ending cumulative alpha value. gamma_cum_start (`float`, defaults to 0.00009): The starting cumulative gamma value. gamma_cum_end (`float`, defaults to 0.99999): The ending cumulative gamma value. """ order = 1 @register_to_config def __init__( self, num_vec_classes: int, num_train_timesteps: int = 100, alpha_cum_start: float = 0.99999, alpha_cum_end: float = 0.000009, gamma_cum_start: float = 0.000009, gamma_cum_end: float = 0.99999, ): self.num_embed = num_vec_classes # By convention, the index for the mask class is the last class index self.mask_class = self.num_embed - 1 at, att = alpha_schedules(num_train_timesteps, alpha_cum_start=alpha_cum_start, alpha_cum_end=alpha_cum_end) ct, ctt = gamma_schedules(num_train_timesteps, gamma_cum_start=gamma_cum_start, gamma_cum_end=gamma_cum_end) num_non_mask_classes = self.num_embed - 1 bt = (1 - at - ct) / num_non_mask_classes btt = (1 - att - ctt) / num_non_mask_classes at = torch.tensor(at.astype("float64")) bt = torch.tensor(bt.astype("float64")) ct = torch.tensor(ct.astype("float64")) log_at = torch.log(at) log_bt = torch.log(bt) log_ct = torch.log(ct) att = torch.tensor(att.astype("float64")) btt = torch.tensor(btt.astype("float64")) ctt = torch.tensor(ctt.astype("float64")) log_cumprod_at = torch.log(att) log_cumprod_bt = torch.log(btt) log_cumprod_ct = torch.log(ctt) self.log_at = log_at.float() self.log_bt = log_bt.float() self.log_ct = log_ct.float() self.log_cumprod_at = log_cumprod_at.float() self.log_cumprod_bt = log_cumprod_bt.float() self.log_cumprod_ct = log_cumprod_ct.float() # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps and diffusion process parameters (alpha, beta, gamma) should be moved to. """ self.num_inference_steps = num_inference_steps timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) self.log_at = self.log_at.to(device) self.log_bt = self.log_bt.to(device) self.log_ct = self.log_ct.to(device) self.log_cumprod_at = self.log_cumprod_at.to(device) self.log_cumprod_bt = self.log_cumprod_bt.to(device) self.log_cumprod_ct = self.log_cumprod_ct.to(device) def step( self, model_output: torch.Tensor, timestep: torch.long, sample: torch.LongTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[VQDiffusionSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by the reverse transition distribution. See [`~VQDiffusionScheduler.q_posterior`] for more details about how the distribution is computer. Args: log_p_x_0: (`torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`): The log probabilities for the predicted classes of the initial latent pixels. Does not include a prediction for the masked class as the initial unnoised image cannot be masked. t (`torch.long`): The timestep that determines which transition matrices are used. x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): The classes of each latent pixel at time `t`. generator (`torch.Generator`, or `None`): A random number generator for the noise applied to `p(x_{t-1} | x_t)` before it is sampled from. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_vq_diffusion.VQDiffusionSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if timestep == 0: log_p_x_t_min_1 = model_output else: log_p_x_t_min_1 = self.q_posterior(model_output, sample, timestep) log_p_x_t_min_1 = gumbel_noised(log_p_x_t_min_1, generator) x_t_min_1 = log_p_x_t_min_1.argmax(dim=1) if not return_dict: return (x_t_min_1,) return VQDiffusionSchedulerOutput(prev_sample=x_t_min_1) def q_posterior(self, log_p_x_0, x_t, t): """ Calculates the log probabilities for the predicted classes of the image at timestep `t-1`: ``` p(x_{t-1} | x_t) = sum( q(x_t | x_{t-1}) * q(x_{t-1} | x_0) * p(x_0) / q(x_t | x_0) ) ``` Args: log_p_x_0 (`torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`): The log probabilities for the predicted classes of the initial latent pixels. Does not include a prediction for the masked class as the initial unnoised image cannot be masked. x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): The classes of each latent pixel at time `t`. t (`torch.Long`): The timestep that determines which transition matrix is used. Returns: `torch.Tensor` of shape `(batch size, num classes, num latent pixels)`: The log probabilities for the predicted classes of the image at timestep `t-1`. """ log_onehot_x_t = index_to_log_onehot(x_t, self.num_embed) log_q_x_t_given_x_0 = self.log_Q_t_transitioning_to_known_class( t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=True ) log_q_t_given_x_t_min_1 = self.log_Q_t_transitioning_to_known_class( t=t, x_t=x_t, log_onehot_x_t=log_onehot_x_t, cumulative=False ) # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) # . . . # . . . # . . . # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) q = log_p_x_0 - log_q_x_t_given_x_0 # sum_0 = p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}), ... , # sum_n = p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) + ... + p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) q_log_sum_exp = torch.logsumexp(q, dim=1, keepdim=True) # p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0 ... p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n # . . . # . . . # . . . # p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0 ... p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n q = q - q_log_sum_exp # (p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1} # . . . # . . . # . . . # (p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1} ... (p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1} # c_cumulative_{t-1} ... c_cumulative_{t-1} q = self.apply_cumulative_transitions(q, t - 1) # ((p_0(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_0 ... ((p_n(x_0=C_0 | x_t) / q(x_t | x_0=C_0) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_0) * sum_n # . . . # . . . # . . . # ((p_0(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_0) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_0 ... ((p_n(x_0=C_{k-1} | x_t) / q(x_t | x_0=C_{k-1}) / sum_n) * a_cumulative_{t-1} + b_cumulative_{t-1}) * q(x_t | x_{t-1}=C_{k-1}) * sum_n # c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 ... c_cumulative_{t-1} * q(x_t | x_{t-1}=C_k) * sum_0 log_p_x_t_min_1 = q + log_q_t_given_x_t_min_1 + q_log_sum_exp # For each column, there are two possible cases. # # Where: # - sum(p_n(x_0))) is summing over all classes for x_0 # - C_i is the class transitioning from (not to be confused with c_t and c_cumulative_t being used for gamma's) # - C_j is the class transitioning to # # 1. x_t is masked i.e. x_t = c_k # # Simplifying the expression, the column vector is: # . # . # . # (c_t / c_cumulative_t) * (a_cumulative_{t-1} * p_n(x_0 = C_i | x_t) + b_cumulative_{t-1} * sum(p_n(x_0))) # . # . # . # (c_cumulative_{t-1} / c_cumulative_t) * sum(p_n(x_0)) # # From equation (11) stated in terms of forward probabilities, the last row is trivially verified. # # For the other rows, we can state the equation as ... # # (c_t / c_cumulative_t) * [b_cumulative_{t-1} * p(x_0=c_0) + ... + (a_cumulative_{t-1} + b_cumulative_{t-1}) * p(x_0=C_i) + ... + b_cumulative_{k-1} * p(x_0=c_{k-1})] # # This verifies the other rows. # # 2. x_t is not masked # # Simplifying the expression, there are two cases for the rows of the column vector, where C_j = C_i and where C_j != C_i: # . # . # . # C_j != C_i: b_t * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / b_cumulative_t) * p_n(x_0 = C_i) + ... + (b_cumulative_{t-1} / (a_cumulative_t + b_cumulative_t)) * p_n(c_0=C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1})) # . # . # . # C_j = C_i: (a_t + b_t) * ((b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_0) + ... + ((a_cumulative_{t-1} + b_cumulative_{t-1}) / (a_cumulative_t + b_cumulative_t)) * p_n(x_0 = C_i = C_j) + ... + (b_cumulative_{t-1} / b_cumulative_t) * p_n(x_0 = c_{k-1})) # . # . # . # 0 # # The last row is trivially verified. The other rows can be verified by directly expanding equation (11) stated in terms of forward probabilities. return log_p_x_t_min_1 def log_Q_t_transitioning_to_known_class( self, *, t: torch.int, x_t: torch.LongTensor, log_onehot_x_t: torch.Tensor, cumulative: bool ): """ Calculates the log probabilities of the rows from the (cumulative or non-cumulative) transition matrix for each latent pixel in `x_t`. Args: t (`torch.Long`): The timestep that determines which transition matrix is used. x_t (`torch.LongTensor` of shape `(batch size, num latent pixels)`): The classes of each latent pixel at time `t`. log_onehot_x_t (`torch.Tensor` of shape `(batch size, num classes, num latent pixels)`): The log one-hot vectors of `x_t`. cumulative (`bool`): If cumulative is `False`, the single step transition matrix `t-1`->`t` is used. If cumulative is `True`, the cumulative transition matrix `0`->`t` is used. Returns: `torch.Tensor` of shape `(batch size, num classes - 1, num latent pixels)`: Each _column_ of the returned matrix is a _row_ of log probabilities of the complete probability transition matrix. When non cumulative, returns `self.num_classes - 1` rows because the initial latent pixel cannot be masked. Where: - `q_n` is the probability distribution for the forward process of the `n`th latent pixel. - C_0 is a class of a latent pixel embedding - C_k is the class of the masked latent pixel non-cumulative result (omitting logarithms): ``` q_0(x_t | x_{t-1} = C_0) ... q_n(x_t | x_{t-1} = C_0) . . . . . . . . . q_0(x_t | x_{t-1} = C_k) ... q_n(x_t | x_{t-1} = C_k) ``` cumulative result (omitting logarithms): ``` q_0_cumulative(x_t | x_0 = C_0) ... q_n_cumulative(x_t | x_0 = C_0) . . . . . . . . . q_0_cumulative(x_t | x_0 = C_{k-1}) ... q_n_cumulative(x_t | x_0 = C_{k-1}) ``` """ if cumulative: a = self.log_cumprod_at[t] b = self.log_cumprod_bt[t] c = self.log_cumprod_ct[t] else: a = self.log_at[t] b = self.log_bt[t] c = self.log_ct[t] if not cumulative: # The values in the onehot vector can also be used as the logprobs for transitioning # from masked latent pixels. If we are not calculating the cumulative transitions, # we need to save these vectors to be re-appended to the final matrix so the values # aren't overwritten. # # `P(x_t!=mask|x_{t-1=mask}) = 0` and 0 will be the value of the last row of the onehot vector # if x_t is not masked # # `P(x_t=mask|x_{t-1=mask}) = 1` and 1 will be the value of the last row of the onehot vector # if x_t is masked log_onehot_x_t_transitioning_from_masked = log_onehot_x_t[:, -1, :].unsqueeze(1) # `index_to_log_onehot` will add onehot vectors for masked pixels, # so the default one hot matrix has one too many rows. See the doc string # for an explanation of the dimensionality of the returned matrix. log_onehot_x_t = log_onehot_x_t[:, :-1, :] # this is a cheeky trick to produce the transition probabilities using log one-hot vectors. # # Don't worry about what values this sets in the columns that mark transitions # to masked latent pixels. They are overwrote later with the `mask_class_mask`. # # Looking at the below logspace formula in non-logspace, each value will evaluate to either # `1 * a + b = a + b` where `log_Q_t` has the one hot value in the column # or # `0 * a + b = b` where `log_Q_t` has the 0 values in the column. # # See equation 7 for more details. log_Q_t = (log_onehot_x_t + a).logaddexp(b) # The whole column of each masked pixel is `c` mask_class_mask = x_t == self.mask_class mask_class_mask = mask_class_mask.unsqueeze(1).expand(-1, self.num_embed - 1, -1) log_Q_t[mask_class_mask] = c if not cumulative: log_Q_t = torch.cat((log_Q_t, log_onehot_x_t_transitioning_from_masked), dim=1) return log_Q_t def apply_cumulative_transitions(self, q, t): bsz = q.shape[0] a = self.log_cumprod_at[t] b = self.log_cumprod_bt[t] c = self.log_cumprod_ct[t] num_latent_pixels = q.shape[2] c = c.expand(bsz, 1, num_latent_pixels) q = (q + a).logaddexp(b) q = torch.cat((q, c), dim=1) return q
diffusers/src/diffusers/schedulers/scheduling_vq_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_vq_diffusion.py", "repo_id": "diffusers", "token_count": 12476 }
185
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class LMSDiscreteScheduler(metaclass=DummyObject): _backends = ["torch", "scipy"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "scipy"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "scipy"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "scipy"])
diffusers/src/diffusers/utils/dummy_torch_and_scipy_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_scipy_objects.py", "repo_id": "diffusers", "token_count": 220 }
186
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Generic utilities """ from collections import OrderedDict from dataclasses import fields, is_dataclass from typing import Any, Tuple import numpy as np from .import_utils import is_torch_available, is_torch_version def is_tensor(x) -> bool: """ Tests if `x` is a `torch.Tensor` or `np.ndarray`. """ if is_torch_available(): import torch if isinstance(x, torch.Tensor): return True return isinstance(x, np.ndarray) class BaseOutput(OrderedDict): """ Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular Python dictionary. <Tip warning={true}> You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple first. </Tip> """ def __init_subclass__(cls) -> None: """Register subclasses as pytree nodes. This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with `static_graph=True` with modules that output `ModelOutput` subclasses. """ if is_torch_available(): import torch.utils._pytree if is_torch_version("<", "2.2"): torch.utils._pytree._register_pytree_node( cls, torch.utils._pytree._dict_flatten, lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), ) else: torch.utils._pytree.register_pytree_node( cls, torch.utils._pytree._dict_flatten, lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), serialized_type_name=f"{cls.__module__}.{cls.__name__}", ) def __post_init__(self) -> None: class_fields = fields(self) # Safety and consistency checks if not len(class_fields): raise ValueError(f"{self.__class__.__name__} has no fields.") first_field = getattr(self, class_fields[0].name) other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and isinstance(first_field, dict): for key, value in first_field.items(): self[key] = value else: for field in class_fields: v = getattr(self, field.name) if v is not None: self[field.name] = v def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__(self, k: Any) -> Any: if isinstance(k, str): inner_dict = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self, name: Any, value: Any) -> None: if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(name, value) super().__setattr__(name, value) def __setitem__(self, key, value): # Will raise a KeyException if needed super().__setitem__(key, value) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(key, value) def __reduce__(self): if not is_dataclass(self): return super().__reduce__() callable, _args, *remaining = super().__reduce__() args = tuple(getattr(self, field.name) for field in fields(self)) return callable, args, *remaining def to_tuple(self) -> Tuple[Any, ...]: """ Convert self to a tuple containing all the attributes/keys that are not `None`. """ return tuple(self[k] for k in self.keys())
diffusers/src/diffusers/utils/outputs.py/0
{ "file_path": "diffusers/src/diffusers/utils/outputs.py", "repo_id": "diffusers", "token_count": 2081 }
187
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import gc import unittest import torch from parameterized import parameterized from diffusers.hooks import HookRegistry, ModelHook from diffusers.models import ModelMixin from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.utils import get_logger from diffusers.utils.import_utils import compare_versions from diffusers.utils.testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_peak_memory_stats, require_torch_accelerator, torch_device, ) class DummyBlock(torch.nn.Module): def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None: super().__init__() self.proj_in = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.proj_out = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj_in(x) x = self.activation(x) x = self.proj_out(x) return x class DummyModel(ModelMixin): def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: super().__init__() self.linear_1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] ) self.linear_2 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_1(x) x = self.activation(x) for block in self.blocks: x = block(x) x = self.linear_2(x) return x # This model implementation contains one type of block (single_blocks) instantiated before another type of block (double_blocks). # The invocation order of these blocks, however, is first the double_blocks and then the single_blocks. # With group offloading implementation before https://github.com/huggingface/diffusers/pull/11375, such a modeling implementation # would result in a device mismatch error because of the assumptions made by the code. The failure case occurs when using: # offload_type="block_level", num_blocks_per_group=2, use_stream=True # Post the linked PR, the implementation will work as expected. class DummyModelWithMultipleBlocks(ModelMixin): def __init__( self, in_features: int, hidden_features: int, out_features: int, num_layers: int, num_single_layers: int ) -> None: super().__init__() self.linear_1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.single_blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_single_layers)] ) self.double_blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] ) self.linear_2 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_1(x) x = self.activation(x) for block in self.double_blocks: x = block(x) for block in self.single_blocks: x = block(x) x = self.linear_2(x) return x # Test for https://github.com/huggingface/diffusers/pull/12077 class DummyModelWithLayerNorm(ModelMixin): def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: super().__init__() self.linear_1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] ) self.layer_norm = torch.nn.LayerNorm(hidden_features, elementwise_affine=True) self.linear_2 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_1(x) x = self.activation(x) for block in self.blocks: x = block(x) x = self.layer_norm(x) x = self.linear_2(x) return x class DummyPipeline(DiffusionPipeline): model_cpu_offload_seq = "model" def __init__(self, model: torch.nn.Module) -> None: super().__init__() self.register_modules(model=model) def __call__(self, x: torch.Tensor) -> torch.Tensor: for _ in range(2): x = x + 0.1 * self.model(x) return x class LayerOutputTrackerHook(ModelHook): def __init__(self): super().__init__() self.outputs = [] def post_forward(self, module, output): self.outputs.append(output) return output @require_torch_accelerator class GroupOffloadTests(unittest.TestCase): in_features = 64 hidden_features = 256 out_features = 64 num_layers = 4 def setUp(self): with torch.no_grad(): self.model = self.get_model() self.input = torch.randn((4, self.in_features)).to(torch_device) def tearDown(self): super().tearDown() del self.model del self.input gc.collect() backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) def get_model(self): torch.manual_seed(0) return DummyModel( in_features=self.in_features, hidden_features=self.hidden_features, out_features=self.out_features, num_layers=self.num_layers, ) def test_offloading_forward_pass(self): @torch.no_grad() def run_forward(model): gc.collect() backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) self.assertTrue( all( module._diffusers_hook.get_hook("group_offloading") is not None for module in model.modules() if hasattr(module, "_diffusers_hook") ) ) model.eval() output = model(self.input)[0].cpu() max_memory_allocated = backend_max_memory_allocated(torch_device) return output, max_memory_allocated self.model.to(torch_device) output_without_group_offloading, mem_baseline = run_forward(self.model) self.model.to("cpu") model = self.get_model() model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) output_with_group_offloading1, mem1 = run_forward(model) model = self.get_model() model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1) output_with_group_offloading2, mem2 = run_forward(model) model = self.get_model() model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) output_with_group_offloading3, mem3 = run_forward(model) model = self.get_model() model.enable_group_offload(torch_device, offload_type="leaf_level") output_with_group_offloading4, mem4 = run_forward(model) model = self.get_model() model.enable_group_offload(torch_device, offload_type="leaf_level", use_stream=True) output_with_group_offloading5, mem5 = run_forward(model) # Precision assertions - offloading should not impact the output self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading3, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading4, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading5, atol=1e-5)) # Memory assertions - offloading should reduce memory usage self.assertTrue(mem4 <= mem5 < mem2 <= mem3 < mem1 < mem_baseline) def test_warning_logged_if_group_offloaded_module_moved_to_accelerator(self): if torch.device(torch_device).type not in ["cuda", "xpu"]: return self.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) logger = get_logger("diffusers.models.modeling_utils") logger.setLevel("INFO") with self.assertLogs(logger, level="WARNING") as cm: self.model.to(torch_device) self.assertIn(f"The module '{self.model.__class__.__name__}' is group offloaded", cm.output[0]) def test_warning_logged_if_group_offloaded_pipe_moved_to_accelerator(self): if torch.device(torch_device).type not in ["cuda", "xpu"]: return pipe = DummyPipeline(self.model) self.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) logger = get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel("INFO") with self.assertLogs(logger, level="WARNING") as cm: pipe.to(torch_device) self.assertIn(f"The module '{self.model.__class__.__name__}' is group offloaded", cm.output[0]) def test_error_raised_if_streams_used_and_no_accelerator_device(self): torch_accelerator_module = getattr(torch, torch_device, torch.cuda) original_is_available = torch_accelerator_module.is_available torch_accelerator_module.is_available = lambda: False with self.assertRaises(ValueError): self.model.enable_group_offload( onload_device=torch.device(torch_device), offload_type="leaf_level", use_stream=True ) torch_accelerator_module.is_available = original_is_available def test_error_raised_if_supports_group_offloading_false(self): self.model._supports_group_offloading = False with self.assertRaisesRegex(ValueError, "does not support group offloading"): self.model.enable_group_offload(onload_device=torch.device(torch_device)) def test_error_raised_if_model_offloading_applied_on_group_offloaded_module(self): pipe = DummyPipeline(self.model) pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"): pipe.enable_model_cpu_offload() def test_error_raised_if_sequential_offloading_applied_on_group_offloaded_module(self): pipe = DummyPipeline(self.model) pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"): pipe.enable_sequential_cpu_offload() def test_error_raised_if_group_offloading_applied_on_model_offloaded_module(self): pipe = DummyPipeline(self.model) pipe.enable_model_cpu_offload() with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"): pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) def test_error_raised_if_group_offloading_applied_on_sequential_offloaded_module(self): pipe = DummyPipeline(self.model) pipe.enable_sequential_cpu_offload() with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"): pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) def test_block_level_stream_with_invocation_order_different_from_initialization_order(self): if torch.device(torch_device).type not in ["cuda", "xpu"]: return model = DummyModelWithMultipleBlocks( in_features=self.in_features, hidden_features=self.hidden_features, out_features=self.out_features, num_layers=self.num_layers, num_single_layers=self.num_layers + 1, ) model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) context = contextlib.nullcontext() if compare_versions("diffusers", "<=", "0.33.0"): # Will raise a device mismatch RuntimeError mentioning weights are on CPU but input is on device context = self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device") with context: model(self.input) @parameterized.expand([("block_level",), ("leaf_level",)]) def test_block_level_offloading_with_parameter_only_module_group(self, offload_type: str): if torch.device(torch_device).type not in ["cuda", "xpu"]: return def apply_layer_output_tracker_hook(model: DummyModelWithLayerNorm): for name, module in model.named_modules(): registry = HookRegistry.check_if_exists_or_initialize(module) hook = LayerOutputTrackerHook() registry.register_hook(hook, "layer_output_tracker") model_ref = DummyModelWithLayerNorm(128, 256, 128, 2) model = DummyModelWithLayerNorm(128, 256, 128, 2) model.load_state_dict(model_ref.state_dict(), strict=True) model_ref.to(torch_device) model.enable_group_offload(torch_device, offload_type=offload_type, num_blocks_per_group=1, use_stream=True) apply_layer_output_tracker_hook(model_ref) apply_layer_output_tracker_hook(model) x = torch.randn(2, 128).to(torch_device) out_ref = model_ref(x) out = model(x) self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match.") num_repeats = 4 for i in range(num_repeats): out_ref = model_ref(x) out = model(x) self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match after multiple invocations.") for (ref_name, ref_module), (name, module) in zip(model_ref.named_modules(), model.named_modules()): assert ref_name == name ref_outputs = ( HookRegistry.check_if_exists_or_initialize(ref_module).get_hook("layer_output_tracker").outputs ) outputs = HookRegistry.check_if_exists_or_initialize(module).get_hook("layer_output_tracker").outputs cumulated_absmax = 0.0 for i in range(len(outputs)): diff = ref_outputs[0] - outputs[i] absdiff = diff.abs() absmax = absdiff.max().item() cumulated_absmax += absmax self.assertLess( cumulated_absmax, 1e-5, f"Output differences for {name} exceeded threshold: {cumulated_absmax:.5f}" )
diffusers/tests/hooks/test_group_offloading.py/0
{ "file_path": "diffusers/tests/hooks/test_group_offloading.py", "repo_id": "diffusers", "token_count": 6609 }
188
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import tempfile import unittest import numpy as np import safetensors.torch import torch from PIL import Image from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel from diffusers.utils.import_utils import is_peft_available from diffusers.utils.testing_utils import ( floats_tensor, is_flaky, require_peft_backend, require_peft_version_greater, skip_mps, torch_device, ) if is_peft_available(): from peft.utils import get_peft_model_state_dict sys.path.append(".") from utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend @skip_mps @is_flaky(max_attempts=10, description="very flaky class") class WanVACELoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = WanVACEPipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_classes = [FlowMatchEulerDiscreteScheduler] scheduler_kwargs = {} transformer_kwargs = { "patch_size": (1, 2, 2), "num_attention_heads": 2, "attention_head_dim": 8, "in_channels": 4, "out_channels": 4, "text_dim": 32, "freq_dim": 16, "ffn_dim": 16, "num_layers": 2, "cross_attn_norm": True, "qk_norm": "rms_norm_across_heads", "rope_max_seq_len": 16, "vace_layers": [0], "vace_in_channels": 72, } transformer_cls = WanVACETransformer3DModel vae_kwargs = { "base_dim": 3, "z_dim": 4, "dim_mult": [1, 1, 1, 1], "latents_mean": torch.randn(4).numpy().tolist(), "latents_std": torch.randn(4).numpy().tolist(), "num_res_blocks": 1, "temperal_downsample": [False, True, True], } vae_cls = AutoencoderKLWan has_two_text_encoders = True tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" text_encoder_target_modules = ["q", "k", "v", "o"] @property def output_shape(self): return (1, 9, 16, 16, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 16 num_channels = 4 num_frames = 9 num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 sizes = (4, 4) height, width = 16, 16 generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) video = [Image.new("RGB", (height, width))] * num_frames mask = [Image.new("L", (height, width), 0)] * num_frames pipeline_inputs = { "video": video, "mask": mask, "prompt": "", "num_frames": num_frames, "num_inference_steps": 1, "guidance_scale": 6.0, "height": height, "width": height, "max_sequence_length": sequence_length, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs def test_simple_inference_with_text_lora_denoiser_fused_multi(self): super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) @unittest.skip("Not supported in Wan VACE.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in Wan VACE.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in Wan VACE.") def test_modify_padding_mode(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_partial_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_and_scale(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_fused(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_save_load(self): pass def test_layerwise_casting_inference_denoiser(self): super().test_layerwise_casting_inference_denoiser() @require_peft_version_greater("0.13.2") def test_lora_exclude_modules_wanvace(self): scheduler_cls = self.scheduler_classes[0] exclude_module_name = "vace_blocks.0.proj_out" components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components).to(torch_device) _, _, inputs = self.get_dummy_inputs(with_generator=False) output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(output_no_lora.shape == self.output_shape) # only supported for `denoiser` now denoiser_lora_config.target_modules = ["proj_out"] denoiser_lora_config.exclude_modules = [exclude_module_name] pipe, _ = self.add_adapters_to_pipeline( pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config ) # The state dict shouldn't contain the modules to be excluded from LoRA. state_dict_from_model = get_peft_model_state_dict(pipe.transformer, adapter_name="default") self.assertTrue(not any(exclude_module_name in k for k in state_dict_from_model)) self.assertTrue(any("proj_out" in k for k in state_dict_from_model)) output_lora_exclude_modules = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdir: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts) pipe.unload_lora_weights() # Check in the loaded state dict. loaded_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) self.assertTrue(not any(exclude_module_name in k for k in loaded_state_dict)) self.assertTrue(any("proj_out" in k for k in loaded_state_dict)) # Check in the state dict obtained after loading LoRA. pipe.load_lora_weights(tmpdir) state_dict_from_model = get_peft_model_state_dict(pipe.transformer, adapter_name="default_0") self.assertTrue(not any(exclude_module_name in k for k in state_dict_from_model)) self.assertTrue(any("proj_out" in k for k in state_dict_from_model)) output_lora_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( not np.allclose(output_no_lora, output_lora_exclude_modules, atol=1e-3, rtol=1e-3), "LoRA should change outputs.", ) self.assertTrue( np.allclose(output_lora_exclude_modules, output_lora_pretrained, atol=1e-3, rtol=1e-3), "Lora outputs should match.", ) def test_simple_inference_with_text_denoiser_lora_and_scale(self): super().test_simple_inference_with_text_denoiser_lora_and_scale()
diffusers/tests/lora/test_lora_layers_wanvace.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_wanvace.py", "repo_id": "diffusers", "token_count": 3714 }
189
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import AutoencoderKLWan from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderKLWanTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderKLWan main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_wan_config(self): return { "base_dim": 3, "z_dim": 16, "dim_mult": [1, 1, 1, 1], "num_res_blocks": 1, "temperal_downsample": [False, True, True], } @property def dummy_input(self): batch_size = 2 num_frames = 9 num_channels = 3 sizes = (16, 16) image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) return {"sample": image} @property def dummy_input_tiling(self): batch_size = 2 num_frames = 9 num_channels = 3 sizes = (128, 128) image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 9, 16, 16) @property def output_shape(self): return (3, 9, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_wan_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def prepare_init_args_and_inputs_for_tiling(self): init_dict = self.get_autoencoder_kl_wan_config() inputs_dict = self.dummy_input_tiling return init_dict, inputs_dict def test_enable_disable_tiling(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_tiling() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_tiling(96, 96, 64, 64) output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), 0.5, "VAE tiling should not affect the inference results", ) torch.manual_seed(0) model.disable_tiling() output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_tiling.detach().cpu().numpy().all(), output_without_tiling_2.detach().cpu().numpy().all(), "Without tiling outputs should match with the outputs when tiling is manually disabled.", ) def test_enable_disable_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_slicing() output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), 0.05, "VAE slicing should not affect the inference results", ) torch.manual_seed(0) model.disable_slicing() output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_slicing.detach().cpu().numpy().all(), output_without_slicing_2.detach().cpu().numpy().all(), "Without slicing outputs should match with the outputs when slicing is manually disabled.", ) @unittest.skip("Gradient checkpointing has not been implemented yet") def test_gradient_checkpointing_is_applied(self): pass @unittest.skip("Test not supported") def test_forward_with_norm_groups(self): pass @unittest.skip("RuntimeError: fill_out not implemented for 'Float8_e4m3fn'") def test_layerwise_casting_inference(self): pass @unittest.skip("RuntimeError: fill_out not implemented for 'Float8_e4m3fn'") def test_layerwise_casting_training(self): pass
diffusers/tests/models/autoencoders/test_models_autoencoder_wan.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_autoencoder_wan.py", "repo_id": "diffusers", "token_count": 2245 }
190
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import AuraFlowTransformer2DModel from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class AuraFlowTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = AuraFlowTransformer2DModel main_input_name = "hidden_states" # We override the items here because the transformer under consideration is small. model_split_percents = [0.7, 0.6, 0.6] @property def dummy_input(self): batch_size = 2 num_channels = 4 height = width = embedding_dim = 32 sequence_length = 256 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, } @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 32, "patch_size": 2, "in_channels": 4, "num_mmdit_layers": 1, "num_single_dit_layers": 1, "attention_head_dim": 8, "num_attention_heads": 4, "caption_projection_dim": 32, "joint_attention_dim": 32, "out_channels": 4, "pos_embed_max_size": 256, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"AuraFlowTransformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) @unittest.skip("AuraFlowTransformer2DModel uses its own dedicated attention processor. This test does not apply") def test_set_attn_processor_for_determinism(self): pass
diffusers/tests/models/transformers/test_models_transformer_aura_flow.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_aura_flow.py", "repo_id": "diffusers", "token_count": 1118 }
191
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import LuminaNextDiT2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class LuminaNextDiT2DModelTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = LuminaNextDiT2DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): """ Args: None Returns: Dict: Dictionary of dummy input tensors """ batch_size = 2 # N num_channels = 4 # C height = width = 16 # H, W embedding_dim = 32 # D sequence_length = 16 # L hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.rand(size=(batch_size,)).to(torch_device) encoder_mask = torch.randn(size=(batch_size, sequence_length)).to(torch_device) image_rotary_emb = torch.randn((384, 384, 4)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, "encoder_mask": encoder_mask, "image_rotary_emb": image_rotary_emb, "cross_attention_kwargs": {}, } @property def input_shape(self): """ Args: None Returns: Tuple: (int, int, int) """ return (4, 16, 16) @property def output_shape(self): """ Args: None Returns: Tuple: (int, int, int) """ return (4, 16, 16) def prepare_init_args_and_inputs_for_common(self): """ Args: None Returns: Tuple: (Dict, Dict) """ init_dict = { "sample_size": 16, "patch_size": 2, "in_channels": 4, "hidden_size": 24, "num_layers": 2, "num_attention_heads": 3, "num_kv_heads": 1, "multiple_of": 16, "ffn_dim_multiplier": None, "norm_eps": 1e-5, "learn_sigma": False, "qk_norm": True, "cross_attention_dim": 32, "scaling_factor": 1.0, } inputs_dict = self.dummy_input return init_dict, inputs_dict
diffusers/tests/models/transformers/test_models_transformer_lumina.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_lumina.py", "repo_id": "diffusers", "token_count": 1481 }
192
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from torch import nn from diffusers import ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel from diffusers.utils import logging from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, is_flaky, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() class UNetControlNetXSModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNetControlNetXSModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 sizes = (16, 16) conditioning_image_size = (3, 32, 32) # size of additional, unprocessed image for control-conditioning noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 8)).to(torch_device) controlnet_cond = floats_tensor((batch_size, *conditioning_image_size)).to(torch_device) conditioning_scale = 1 return { "sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states, "controlnet_cond": controlnet_cond, "conditioning_scale": conditioning_scale, } @property def input_shape(self): return (4, 16, 16) @property def output_shape(self): return (4, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 16, "down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"), "up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"), "block_out_channels": (4, 8), "cross_attention_dim": 8, "transformer_layers_per_block": 1, "num_attention_heads": 2, "norm_num_groups": 4, "upcast_attention": False, "ctrl_block_out_channels": [2, 4], "ctrl_num_attention_heads": 4, "ctrl_max_norm_num_groups": 2, "ctrl_conditioning_embedding_out_channels": (2, 2), } inputs_dict = self.dummy_input return init_dict, inputs_dict def get_dummy_unet(self): """For some tests we also need the underlying UNet. For these, we'll build the UNetControlNetXSModel from the UNet and ControlNetXS-Adapter""" return UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=8, norm_num_groups=4, use_linear_projection=True, ) def get_dummy_controlnet_from_unet(self, unet, **kwargs): """For some tests we also need the underlying ControlNetXS-Adapter. For these, we'll build the UNetControlNetXSModel from the UNet and ControlNetXS-Adapter""" # size_ratio and conditioning_embedding_out_channels chosen to keep model small return ControlNetXSAdapter.from_unet(unet, size_ratio=1, conditioning_embedding_out_channels=(2, 2), **kwargs) def test_from_unet(self): unet = self.get_dummy_unet() controlnet = self.get_dummy_controlnet_from_unet(unet) model = UNetControlNetXSModel.from_unet(unet, controlnet) model_state_dict = model.state_dict() def assert_equal_weights(module, weight_dict_prefix): for param_name, param_value in module.named_parameters(): assert torch.equal(model_state_dict[weight_dict_prefix + "." + param_name], param_value) # # check unet # everything expect down,mid,up blocks modules_from_unet = [ "time_embedding", "conv_in", "conv_norm_out", "conv_out", ] for p in modules_from_unet: assert_equal_weights(getattr(unet, p), "base_" + p) optional_modules_from_unet = [ "class_embedding", "add_time_proj", "add_embedding", ] for p in optional_modules_from_unet: if hasattr(unet, p) and getattr(unet, p) is not None: assert_equal_weights(getattr(unet, p), "base_" + p) # down blocks assert len(unet.down_blocks) == len(model.down_blocks) for i, d in enumerate(unet.down_blocks): assert_equal_weights(d.resnets, f"down_blocks.{i}.base_resnets") if hasattr(d, "attentions"): assert_equal_weights(d.attentions, f"down_blocks.{i}.base_attentions") if hasattr(d, "downsamplers") and getattr(d, "downsamplers") is not None: assert_equal_weights(d.downsamplers[0], f"down_blocks.{i}.base_downsamplers") # mid block assert_equal_weights(unet.mid_block, "mid_block.base_midblock") # up blocks assert len(unet.up_blocks) == len(model.up_blocks) for i, u in enumerate(unet.up_blocks): assert_equal_weights(u.resnets, f"up_blocks.{i}.resnets") if hasattr(u, "attentions"): assert_equal_weights(u.attentions, f"up_blocks.{i}.attentions") if hasattr(u, "upsamplers") and getattr(u, "upsamplers") is not None: assert_equal_weights(u.upsamplers[0], f"up_blocks.{i}.upsamplers") # # check controlnet # everything expect down,mid,up blocks modules_from_controlnet = { "controlnet_cond_embedding": "controlnet_cond_embedding", "conv_in": "ctrl_conv_in", "control_to_base_for_conv_in": "control_to_base_for_conv_in", } optional_modules_from_controlnet = {"time_embedding": "ctrl_time_embedding"} for name_in_controlnet, name_in_unetcnxs in modules_from_controlnet.items(): assert_equal_weights(getattr(controlnet, name_in_controlnet), name_in_unetcnxs) for name_in_controlnet, name_in_unetcnxs in optional_modules_from_controlnet.items(): if hasattr(controlnet, name_in_controlnet) and getattr(controlnet, name_in_controlnet) is not None: assert_equal_weights(getattr(controlnet, name_in_controlnet), name_in_unetcnxs) # down blocks assert len(controlnet.down_blocks) == len(model.down_blocks) for i, d in enumerate(controlnet.down_blocks): assert_equal_weights(d.resnets, f"down_blocks.{i}.ctrl_resnets") assert_equal_weights(d.base_to_ctrl, f"down_blocks.{i}.base_to_ctrl") assert_equal_weights(d.ctrl_to_base, f"down_blocks.{i}.ctrl_to_base") if d.attentions is not None: assert_equal_weights(d.attentions, f"down_blocks.{i}.ctrl_attentions") if d.downsamplers is not None: assert_equal_weights(d.downsamplers, f"down_blocks.{i}.ctrl_downsamplers") # mid block assert_equal_weights(controlnet.mid_block.base_to_ctrl, "mid_block.base_to_ctrl") assert_equal_weights(controlnet.mid_block.midblock, "mid_block.ctrl_midblock") assert_equal_weights(controlnet.mid_block.ctrl_to_base, "mid_block.ctrl_to_base") # up blocks assert len(controlnet.up_connections) == len(model.up_blocks) for i, u in enumerate(controlnet.up_connections): assert_equal_weights(u.ctrl_to_base, f"up_blocks.{i}.ctrl_to_base") def test_freeze_unet(self): def assert_frozen(module): for p in module.parameters(): assert not p.requires_grad def assert_unfrozen(module): for p in module.parameters(): assert p.requires_grad init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = UNetControlNetXSModel(**init_dict) model.freeze_unet_params() # # check unet # everything expect down,mid,up blocks modules_from_unet = [ model.base_time_embedding, model.base_conv_in, model.base_conv_norm_out, model.base_conv_out, ] for m in modules_from_unet: assert_frozen(m) optional_modules_from_unet = [ model.base_add_time_proj, model.base_add_embedding, ] for m in optional_modules_from_unet: if m is not None: assert_frozen(m) # down blocks for i, d in enumerate(model.down_blocks): assert_frozen(d.base_resnets) if isinstance(d.base_attentions, nn.ModuleList): # attentions can be list of Nones assert_frozen(d.base_attentions) if d.base_downsamplers is not None: assert_frozen(d.base_downsamplers) # mid block assert_frozen(model.mid_block.base_midblock) # up blocks for i, u in enumerate(model.up_blocks): assert_frozen(u.resnets) if isinstance(u.attentions, nn.ModuleList): # attentions can be list of Nones assert_frozen(u.attentions) if u.upsamplers is not None: assert_frozen(u.upsamplers) # # check controlnet # everything expect down,mid,up blocks modules_from_controlnet = [ model.controlnet_cond_embedding, model.ctrl_conv_in, model.control_to_base_for_conv_in, ] optional_modules_from_controlnet = [model.ctrl_time_embedding] for m in modules_from_controlnet: assert_unfrozen(m) for m in optional_modules_from_controlnet: if m is not None: assert_unfrozen(m) # down blocks for d in model.down_blocks: assert_unfrozen(d.ctrl_resnets) assert_unfrozen(d.base_to_ctrl) assert_unfrozen(d.ctrl_to_base) if isinstance(d.ctrl_attentions, nn.ModuleList): # attentions can be list of Nones assert_unfrozen(d.ctrl_attentions) if d.ctrl_downsamplers is not None: assert_unfrozen(d.ctrl_downsamplers) # mid block assert_unfrozen(model.mid_block.base_to_ctrl) assert_unfrozen(model.mid_block.ctrl_midblock) assert_unfrozen(model.mid_block.ctrl_to_base) # up blocks for u in model.up_blocks: assert_unfrozen(u.ctrl_to_base) def test_gradient_checkpointing_is_applied(self): expected_set = { "Transformer2DModel", "UNetMidBlock2DCrossAttn", "ControlNetXSCrossAttnDownBlock2D", "ControlNetXSCrossAttnMidBlock2D", "ControlNetXSCrossAttnUpBlock2D", } super().test_gradient_checkpointing_is_applied(expected_set=expected_set) @is_flaky def test_forward_no_control(self): unet = self.get_dummy_unet() controlnet = self.get_dummy_controlnet_from_unet(unet) model = UNetControlNetXSModel.from_unet(unet, controlnet) unet = unet.to(torch_device) model = model.to(torch_device) input_ = self.dummy_input control_specific_input = ["controlnet_cond", "conditioning_scale"] input_for_unet = {k: v for k, v in input_.items() if k not in control_specific_input} with torch.no_grad(): unet_output = unet(**input_for_unet).sample.cpu() unet_controlnet_output = model(**input_, apply_control=False).sample.cpu() assert np.abs(unet_output.flatten() - unet_controlnet_output.flatten()).max() < 3e-4 def test_time_embedding_mixing(self): unet = self.get_dummy_unet() controlnet = self.get_dummy_controlnet_from_unet(unet) controlnet_mix_time = self.get_dummy_controlnet_from_unet( unet, time_embedding_mix=0.5, learn_time_embedding=True ) model = UNetControlNetXSModel.from_unet(unet, controlnet) model_mix_time = UNetControlNetXSModel.from_unet(unet, controlnet_mix_time) unet = unet.to(torch_device) model = model.to(torch_device) model_mix_time = model_mix_time.to(torch_device) input_ = self.dummy_input with torch.no_grad(): output = model(**input_).sample output_mix_time = model_mix_time(**input_).sample assert output.shape == output_mix_time.shape @unittest.skip("Test not supported.") def test_forward_with_norm_groups(self): # UNetControlNetXSModel currently only supports StableDiffusion and StableDiffusion-XL, both of which have norm_num_groups fixed at 32. So we don't need to test different values for norm_num_groups. pass
diffusers/tests/models/unets/test_models_unet_controlnetxs.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_controlnetxs.py", "repo_id": "diffusers", "token_count": 6217 }
193
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import PIL.Image import torch from diffusers.image_processor import VaeImageProcessor class ImageProcessorTest(unittest.TestCase): @property def dummy_sample(self): batch_size = 1 num_channels = 3 height = 8 width = 8 sample = torch.rand((batch_size, num_channels, height, width)) return sample @property def dummy_mask(self): batch_size = 1 num_channels = 1 height = 8 width = 8 sample = torch.rand((batch_size, num_channels, height, width)) return sample def to_np(self, image): if isinstance(image[0], PIL.Image.Image): return np.stack([np.array(i) for i in image], axis=0) elif isinstance(image, torch.Tensor): return image.cpu().numpy().transpose(0, 2, 3, 1) return image def test_vae_image_processor_pt(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_pt = self.dummy_sample input_np = self.to_np(input_pt) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess( image_processor.preprocess(input_pt), output_type=output_type, ) out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np assert np.abs(in_np - out_np).max() < 1e-6, ( f"decoded output does not match input for output_type {output_type}" ) def test_vae_image_processor_np(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_np = self.dummy_sample.cpu().numpy().transpose(0, 2, 3, 1) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess(image_processor.preprocess(input_np), output_type=output_type) out_np = self.to_np(out) in_np = (input_np * 255).round() if output_type == "pil" else input_np assert np.abs(in_np - out_np).max() < 1e-6, ( f"decoded output does not match input for output_type {output_type}" ) def test_vae_image_processor_pil(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) input_np = self.dummy_sample.cpu().numpy().transpose(0, 2, 3, 1) input_pil = image_processor.numpy_to_pil(input_np) for output_type in ["pt", "np", "pil"]: out = image_processor.postprocess(image_processor.preprocess(input_pil), output_type=output_type) for i, o in zip(input_pil, out): in_np = np.array(i) out_np = self.to_np(out) if output_type == "pil" else (self.to_np(out) * 255).round() assert np.abs(in_np - out_np).max() < 1e-6, ( f"decoded output does not match input for output_type {output_type}" ) def test_preprocess_input_3d(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) input_pt_4d = self.dummy_sample input_pt_3d = input_pt_4d.squeeze(0) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) input_np_4d = self.to_np(self.dummy_sample) input_np_3d = input_np_4d.squeeze(0) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) assert np.abs(out_pt_4d - out_pt_3d).max() < 1e-6 assert np.abs(out_np_4d - out_np_3d).max() < 1e-6 def test_preprocess_input_list(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) input_pt_4d = self.dummy_sample input_pt_list = list(input_pt_4d) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_list = image_processor.postprocess( image_processor.preprocess(input_pt_list), output_type="np", ) input_np_4d = self.to_np(self.dummy_sample) input_np_list = list(input_np_4d) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_list = image_processor.postprocess( image_processor.preprocess(input_np_list), output_type="np", ) assert np.abs(out_pt_4d - out_pt_list).max() < 1e-6 assert np.abs(out_np_4d - out_np_list).max() < 1e-6 def test_preprocess_input_mask_3d(self): image_processor = VaeImageProcessor( do_resize=False, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) input_pt_4d = self.dummy_mask input_pt_3d = input_pt_4d.squeeze(0) input_pt_2d = input_pt_3d.squeeze(0) out_pt_4d = image_processor.postprocess( image_processor.preprocess(input_pt_4d), output_type="np", ) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) out_pt_2d = image_processor.postprocess( image_processor.preprocess(input_pt_2d), output_type="np", ) input_np_4d = self.to_np(self.dummy_mask) input_np_3d = input_np_4d.squeeze(0) input_np_3d_1 = input_np_4d.squeeze(-1) input_np_2d = input_np_3d.squeeze(-1) out_np_4d = image_processor.postprocess( image_processor.preprocess(input_np_4d), output_type="np", ) out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) out_np_3d_1 = image_processor.postprocess( image_processor.preprocess(input_np_3d_1), output_type="np", ) out_np_2d = image_processor.postprocess( image_processor.preprocess(input_np_2d), output_type="np", ) assert np.abs(out_pt_4d - out_pt_3d).max() == 0 assert np.abs(out_pt_4d - out_pt_2d).max() == 0 assert np.abs(out_np_4d - out_np_3d).max() == 0 assert np.abs(out_np_4d - out_np_3d_1).max() == 0 assert np.abs(out_np_4d - out_np_2d).max() == 0 def test_preprocess_input_mask_list(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False, do_convert_grayscale=True) input_pt_4d = self.dummy_mask input_pt_3d = input_pt_4d.squeeze(0) input_pt_2d = input_pt_3d.squeeze(0) inputs_pt = [input_pt_4d, input_pt_3d, input_pt_2d] inputs_pt_list = [[input_pt] for input_pt in inputs_pt] for input_pt, input_pt_list in zip(inputs_pt, inputs_pt_list): out_pt = image_processor.postprocess( image_processor.preprocess(input_pt), output_type="np", ) out_pt_list = image_processor.postprocess( image_processor.preprocess(input_pt_list), output_type="np", ) assert np.abs(out_pt - out_pt_list).max() < 1e-6 input_np_4d = self.to_np(self.dummy_mask) input_np_3d = input_np_4d.squeeze(0) input_np_2d = input_np_3d.squeeze(-1) inputs_np = [input_np_4d, input_np_3d, input_np_2d] inputs_np_list = [[input_np] for input_np in inputs_np] for input_np, input_np_list in zip(inputs_np, inputs_np_list): out_np = image_processor.postprocess( image_processor.preprocess(input_np), output_type="np", ) out_np_list = image_processor.postprocess( image_processor.preprocess(input_np_list), output_type="np", ) assert np.abs(out_np - out_np_list).max() < 1e-6 def test_preprocess_input_mask_3d_batch(self): image_processor = VaeImageProcessor(do_resize=False, do_normalize=False, do_convert_grayscale=True) # create a dummy mask input with batch_size 2 dummy_mask_batch = torch.cat([self.dummy_mask] * 2, axis=0) # squeeze out the channel dimension input_pt_3d = dummy_mask_batch.squeeze(1) input_np_3d = self.to_np(dummy_mask_batch).squeeze(-1) input_pt_3d_list = list(input_pt_3d) input_np_3d_list = list(input_np_3d) out_pt_3d = image_processor.postprocess( image_processor.preprocess(input_pt_3d), output_type="np", ) out_pt_3d_list = image_processor.postprocess( image_processor.preprocess(input_pt_3d_list), output_type="np", ) assert np.abs(out_pt_3d - out_pt_3d_list).max() < 1e-6 out_np_3d = image_processor.postprocess( image_processor.preprocess(input_np_3d), output_type="np", ) out_np_3d_list = image_processor.postprocess( image_processor.preprocess(input_np_3d_list), output_type="np", ) assert np.abs(out_np_3d - out_np_3d_list).max() < 1e-6 def test_vae_image_processor_resize_pt(self): image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) input_pt = self.dummy_sample b, c, h, w = input_pt.shape scale = 2 out_pt = image_processor.resize(image=input_pt, height=h // scale, width=w // scale) exp_pt_shape = (b, c, h // scale, w // scale) assert out_pt.shape == exp_pt_shape, ( f"resized image output shape '{out_pt.shape}' didn't match expected shape '{exp_pt_shape}'." ) def test_vae_image_processor_resize_np(self): image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) input_pt = self.dummy_sample b, c, h, w = input_pt.shape scale = 2 input_np = self.to_np(input_pt) out_np = image_processor.resize(image=input_np, height=h // scale, width=w // scale) exp_np_shape = (b, h // scale, w // scale, c) assert out_np.shape == exp_np_shape, ( f"resized image output shape '{out_np.shape}' didn't match expected shape '{exp_np_shape}'." )
diffusers/tests/others/test_image_processor.py/0
{ "file_path": "diffusers/tests/others/test_image_processor.py", "repo_id": "diffusers", "token_count": 5499 }
194
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import pytest import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, GPT2Config, GPT2LMHeadModel, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, T5Config, T5EncoderModel, T5Tokenizer, ) from diffusers import ( AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import is_transformers_version from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, is_torch_version, nightly, torch_device, ) from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AudioLDM2Pipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) unet = AudioLDM2UNet2DConditionModel( block_out_channels=(8, 16), layers_per_block=1, norm_num_groups=8, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(8, 16), ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[8, 16], in_channels=1, out_channels=1, norm_num_groups=8, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=8, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=1, num_hidden_layers=1, pad_token_id=1, vocab_size=1000, projection_dim=8, ) audio_branch_config = ClapAudioConfig( spec_size=8, window_size=4, num_mel_bins=8, intermediate_size=37, layer_norm_eps=1e-05, depths=[1, 1], num_attention_heads=[1, 1], num_hidden_layers=1, hidden_size=192, projection_dim=8, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=16, ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) text_encoder_2_config = T5Config( vocab_size=32100, d_model=32, d_ff=37, d_kv=8, num_heads=1, num_layers=1, ) text_encoder_2 = T5EncoderModel(text_encoder_2_config) tokenizer_2 = T5Tokenizer.from_pretrained("hf-internal-testing/tiny-random-T5Model", model_max_length=77) torch.manual_seed(0) language_model_config = GPT2Config( n_embd=16, n_head=1, n_layer=1, vocab_size=1000, n_ctx=99, n_positions=99, ) language_model = GPT2LMHeadModel(language_model_config) language_model.config.max_new_tokens = 8 torch.manual_seed(0) projection_model = AudioLDM2ProjectionModel( text_encoder_dim=16, text_encoder_1_dim=32, langauge_model_dim=16, ) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "feature_extractor": feature_extractor, "language_model": language_model, "projection_model": projection_model, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs @pytest.mark.xfail( condition=is_transformers_version(">=", "4.54.1"), reason="Test currently fails on Transformers version 4.54.1.", strict=False, ) def test_audioldm2_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = audioldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [ 2.602e-03, 1.729e-03, 1.863e-03, -2.219e-03, -2.656e-03, -2.017e-03, -2.648e-03, -2.115e-03, -2.502e-03, -2.081e-03, ] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = audioldm_pipe.tokenizer( prompt, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) inputs["prompt_embeds"] = t5_prompt_embeds inputs["generated_prompt_embeds"] = generated_prompt_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] generated_embeds = [] for p in [prompt, negative_prompt]: text_inputs = audioldm_pipe.tokenizer( p, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True if len(embeds) == 0 else embeds[0].shape[1], truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) embeds.append(t5_prompt_embeds) generated_embeds.append(generated_prompt_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds inputs["generated_prompt_embeds"], inputs["negative_generated_prompt_embeds"] = generated_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 @pytest.mark.xfail( condition=is_transformers_version(">=", "4.54.1"), reason="Test currently fails on Transformers version 4.54.1.", strict=False, ) def test_audioldm2_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0026, 0.0017, 0.0018, -0.0022, -0.0026, -0.002, -0.0026, -0.0021, -0.0025, -0.0021] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = audioldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 1 audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = audioldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_audioldm2_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = audioldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = audioldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_audioldm2_vocoder_model_in_dim(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = audioldm_pipe.vocoder.config config.model_in_dim *= 2 audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) @unittest.skip("Raises a not implemented error in AudioLDM2") def test_xformers_attention_forwardGenerator_pass(self): pass def test_dict_tuple_outputs_equivalent(self): # increase tolerance from 1e-4 -> 3e-4 to account for large composite model super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-4) @pytest.mark.xfail( condition=is_torch_version(">=", "2.7"), reason="Test currently fails on PyTorch 2.7.", strict=False, ) def test_inference_batch_single_identical(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model self._test_inference_batch_single_identical(expected_max_diff=2e-4) def test_save_load_local(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_local(expected_max_difference=2e-4) def test_save_load_optional_components(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_optional_components(expected_max_difference=2e-4) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) @unittest.skip("Test not supported.") def test_sequential_cpu_offload_forward_pass(self): pass @unittest.skip("Test not supported for now because of the use of `projection_model` in `encode_prompt()`.") def test_encode_prompt_works_in_isolation(self): pass @unittest.skip("Not supported yet due to CLAPModel.") def test_sequential_offload_forward_pass_twice(self): pass @unittest.skip("Not supported yet, the second forward has mixed devices and `vocoder` is not offloaded.") def test_cpu_offload_forward_pass_twice(self): pass @unittest.skip("Not supported yet. `vocoder` is not offloaded.") def test_model_cpu_offload_forward_pass(self): pass @nightly class AudioLDM2PipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def get_inputs_tts(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A men saying", "transcription": "hello my name is John", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm2(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[17275:17285] expected_slice = np.array([0.0791, 0.0666, 0.1158, 0.1227, 0.1171, -0.2880, -0.1940, -0.0283, -0.0126, 0.1127]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_lms(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[31390:31400] expected_slice = np.array( [-0.1318, -0.0577, 0.0446, -0.0573, 0.0659, 0.1074, -0.2600, 0.0080, -0.2190, -0.4301] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_large(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2-large") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8825:8835] expected_slice = np.array( [-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_tts(self): audioldm_tts_pipe = AudioLDM2Pipeline.from_pretrained("anhnct/audioldm2_gigaspeech") audioldm_tts_pipe = audioldm_tts_pipe.to(torch_device) audioldm_tts_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs_tts(torch_device) audio = audioldm_tts_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8825:8835] expected_slice = np.array( [-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
diffusers/tests/pipelines/audioldm2/test_audioldm2.py/0
{ "file_path": "diffusers/tests/pipelines/audioldm2/test_audioldm2.py", "repo_id": "diffusers", "token_count": 11643 }
195
import random import unittest import numpy as np import torch # torch_device, # {{ edit_1 }} Removed unused import from transformers import ( AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel, ) from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxControlNetInpaintPipeline, FluxControlNetModel, FluxTransformer2DModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class FluxControlNetInpaintPipelineTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = FluxControlNetInpaintPipeline params = frozenset( [ "prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds", "image", "mask_image", "control_image", "strength", "num_inference_steps", "controlnet_conditioning_scale", ] ) batch_params = frozenset(["prompt", "image", "mask_image", "control_image"]) test_xformers_attention = False def get_dummy_components(self): torch.manual_seed(0) transformer = FluxTransformer2DModel( patch_size=1, in_channels=8, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModel(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=2, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) torch.manual_seed(0) controlnet = FluxControlNetModel( patch_size=1, in_channels=8, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, "controlnet": controlnet, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) mask_image = torch.ones((1, 1, 32, 32)).to(device) control_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "control_image": control_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 32, "width": 32, "max_sequence_length": 48, "strength": 0.8, "output_type": "np", } return inputs def test_flux_controlnet_inpaint_with_num_images_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_images_per_prompt"] = 2 output = pipe(**inputs) images = output.images assert images.shape == (2, 32, 32, 3) def test_flux_controlnet_inpaint_with_controlnet_conditioning_scale(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output_default = pipe(**inputs) image_default = output_default.images inputs["controlnet_conditioning_scale"] = 0.5 output_scaled = pipe(**inputs) image_scaled = output_scaled.images # Ensure that changing the controlnet_conditioning_scale produces a different output assert not np.allclose(image_default, image_scaled, atol=0.01) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) height_width_pairs = [(32, 32), (72, 56)] for height, width in height_width_pairs: expected_height = height - height % (pipe.vae_scale_factor * 2) expected_width = width - width % (pipe.vae_scale_factor * 2) inputs.update( { "control_image": randn_tensor( (1, 3, height, width), device=torch_device, dtype=torch.float16, ), "image": randn_tensor( (1, 3, height, width), device=torch_device, dtype=torch.float16, ), "mask_image": torch.ones((1, 1, height, width)).to(torch_device), "height": height, "width": width, } ) image = pipe(**inputs).images[0] output_height, output_width, _ = image.shape assert (output_height, output_width) == (expected_height, expected_width)
diffusers/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py", "repo_id": "diffusers", "token_count": 3792 }
196
import tempfile import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import DDPMScheduler, UNet2DConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np # WARN: the hf-internal-testing/tiny-random-t5 text encoder has some non-determinism in the `save_load` tests. class IFPipelineTesterMixin: def _get_dummy_components(self): torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ], mid_block_type="UNetMidBlock2DSimpleCrossAttn", up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type="text", addition_embed_type_num_heads=2, cross_attention_norm="group_norm", resnet_time_scale_shift="scale_shift", act_fn="gelu", ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, thresholding=True, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type="epsilon", variance_type="learned_range", ) torch.manual_seed(0) watermarker = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _get_superresolution_dummy_components(self): torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ], mid_block_type="UNetMidBlock2DSimpleCrossAttn", up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type="text", addition_embed_type_num_heads=2, cross_attention_norm="group_norm", resnet_time_scale_shift="scale_shift", act_fn="gelu", class_embed_type="timestep", mid_block_scale_factor=1.414, time_embedding_act_fn="gelu", time_embedding_dim=32, ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, thresholding=True, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type="epsilon", variance_type="learned_range", ) torch.manual_seed(0) image_noising_scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, ) torch.manual_seed(0) watermarker = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } # this test is modified from the base class because if pipelines set the text encoder # as optional with the intention that the user is allowed to encode the prompt once # and then pass the embeddings directly to the pipeline. The base class test uses # the unmodified arguments from `self.get_dummy_inputs` which will pass the unencoded # prompt to the pipeline when the text encoder is set to None, throwing an error. # So we make the test reflect the intended usage of setting the text encoder to None. def _test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] if "image" in inputs: image = inputs["image"] else: image = None if "mask_image" in inputs: mask_image = inputs["mask_image"] else: mask_image = None if "original_image" in inputs: original_image = inputs["original_image"] else: original_image = None prompt_embeds, negative_prompt_embeds = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: inputs["image"] = image if mask_image is not None: inputs["mask_image"] = mask_image if original_image is not None: inputs["original_image"] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: inputs["image"] = image if mask_image is not None: inputs["mask_image"] = mask_image if original_image is not None: inputs["original_image"] = original_image output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) # Modified from `PipelineTesterMixin` to set the attn processor as it's not serialized. # This should be handled in the base test and then this method can be removed. def _test_save_load_local(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4)
diffusers/tests/pipelines/deepfloyd_if/__init__.py/0
{ "file_path": "diffusers/tests/pipelines/deepfloyd_if/__init__.py", "repo_id": "diffusers", "token_count": 4583 }
197
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from PIL import Image from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForText2Image, Kandinsky3Pipeline, Kandinsky3UNet, VQModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, load_image, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Kandinsky3PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Kandinsky3Pipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS test_xformers_attention = False @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = Kandinsky3UNet( in_channels=4, time_embedding_dim=4, groups=2, attention_head_dim=4, layers_per_block=3, block_out_channels=(32, 64), cross_attention_dim=4, encoder_hid_dim=32, ) scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="squaredcos_cap_v2", clip_sample=True, thresholding=False, ) torch.manual_seed(0) movq = self.dummy_movq torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "unet": unet, "scheduler": scheduler, "movq": movq, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "width": 16, "height": 16, } return inputs def test_kandinsky3(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.3768, 0.4373, 0.4865, 0.4890, 0.4299, 0.5122, 0.4921, 0.4924, 0.5599]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_slice.flatten()}" ) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) @slow @require_torch_accelerator class Kandinsky3PipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_kandinskyV3(self): pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." generator = torch.Generator(device="cpu").manual_seed(0) image = pipe(prompt, num_inference_steps=5, generator=generator).images[0] assert image.size == (1024, 1024) expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" ) image_processor = VaeImageProcessor() image_np = image_processor.pil_to_numpy(image) expected_image_np = image_processor.pil_to_numpy(expected_image) self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2)) def test_kandinskyV3_img2img(self): pipe = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" ) w, h = 512, 512 image = image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) prompt = "A painting of the inside of a subway train with tiny raccoons." image = pipe(prompt, image=image, strength=0.75, num_inference_steps=5, generator=generator).images[0] assert image.size == (512, 512) expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/i2i.png" ) image_processor = VaeImageProcessor() image_np = image_processor.pil_to_numpy(image) expected_image_np = image_processor.pil_to_numpy(expected_image) self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2))
diffusers/tests/pipelines/kandinsky3/test_kandinsky3.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky3/test_kandinsky3.py", "repo_id": "diffusers", "token_count": 3627 }
198
import gc import unittest import numpy as np import torch from transformers import AutoTokenizer from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, OmniGenPipeline, OmniGenTransformer2DModel from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin class OmniGenPipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = OmniGenPipeline params = frozenset(["prompt", "guidance_scale"]) batch_params = frozenset(["prompt"]) test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) transformer = OmniGenTransformer2DModel( hidden_size=16, num_attention_heads=4, num_key_value_heads=4, intermediate_size=32, num_layers=1, in_channels=4, time_step_dim=4, rope_scaling={"long_factor": list(range(1, 3)), "short_factor": list(range(1, 3))}, ) torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4, 4, 4, 4), layers_per_block=1, latent_channels=4, norm_num_groups=1, up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], ) scheduler = FlowMatchEulerDiscreteScheduler(invert_sigmas=True, num_train_timesteps=1) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 1, "guidance_scale": 3.0, "output_type": "np", "height": 16, "width": 16, } return inputs def test_inference(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) generated_image = pipe(**inputs).images[0] self.assertEqual(generated_image.shape, (16, 16, 3)) @slow @require_torch_accelerator class OmniGenPipelineSlowTests(unittest.TestCase): pipeline_class = OmniGenPipeline repo_id = "shitao/OmniGen-v1-diffusers" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) return { "prompt": "A photo of a cat", "num_inference_steps": 2, "guidance_scale": 2.5, "output_type": "np", "generator": generator, } def test_omnigen_inference(self): pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16) pipe.enable_model_cpu_offload() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] image_slice = image[0, :10, :10] expected_slices = Expectations( { ("xpu", 3): np.array( [ [0.05859375, 0.05859375, 0.04492188], [0.04882812, 0.04101562, 0.03320312], [0.04882812, 0.04296875, 0.03125], [0.04296875, 0.0390625, 0.03320312], [0.04296875, 0.03710938, 0.03125], [0.04492188, 0.0390625, 0.03320312], [0.04296875, 0.03710938, 0.03125], [0.04101562, 0.03710938, 0.02734375], [0.04101562, 0.03515625, 0.02734375], [0.04101562, 0.03515625, 0.02929688], ], dtype=np.float32, ), ("cuda", 7): np.array( [ [0.1783447, 0.16772744, 0.14339337], [0.17066911, 0.15521264, 0.13757327], [0.17072496, 0.15531206, 0.13524258], [0.16746324, 0.1564025, 0.13794944], [0.16490817, 0.15258026, 0.13697758], [0.16971767, 0.15826806, 0.13928896], [0.16782972, 0.15547255, 0.13783783], [0.16464645, 0.15281534, 0.13522372], [0.16535294, 0.15301755, 0.13526791], [0.16365296, 0.15092957, 0.13443318], ], dtype=np.float32, ), ("cuda", 8): np.array( [ [0.0546875, 0.05664062, 0.04296875], [0.046875, 0.04101562, 0.03320312], [0.05078125, 0.04296875, 0.03125], [0.04296875, 0.04101562, 0.03320312], [0.0390625, 0.03710938, 0.02929688], [0.04296875, 0.03710938, 0.03125], [0.0390625, 0.03710938, 0.02929688], [0.0390625, 0.03710938, 0.02734375], [0.0390625, 0.03320312, 0.02734375], [0.0390625, 0.03320312, 0.02734375], ], dtype=np.float32, ), } ) expected_slice = expected_slices.get_expectation() max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) assert max_diff < 1e-4
diffusers/tests/pipelines/omnigen/test_pipeline_omnigen.py/0
{ "file_path": "diffusers/tests/pipelines/omnigen/test_pipeline_omnigen.py", "repo_id": "diffusers", "token_count": 3642 }
199
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import unittest import numpy as np import torch from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class SanaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = SanaPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): torch.manual_seed(0) transformer = SanaTransformer2DModel( patch_size=1, in_channels=4, out_channels=4, num_layers=1, num_attention_heads=2, attention_head_dim=4, num_cross_attention_heads=2, cross_attention_head_dim=4, cross_attention_dim=8, caption_channels=8, sample_size=32, ) torch.manual_seed(0) vae = AutoencoderDC( in_channels=3, latent_channels=4, attention_head_dim=2, encoder_block_types=( "ResBlock", "EfficientViTBlock", ), decoder_block_types=( "ResBlock", "EfficientViTBlock", ), encoder_block_out_channels=(8, 8), decoder_block_out_channels=(8, 8), encoder_qkv_multiscales=((), (5,)), decoder_qkv_multiscales=((), (5,)), encoder_layers_per_block=(1, 1), decoder_layers_per_block=[1, 1], downsample_block_type="conv", upsample_block_type="interpolate", decoder_norm_types="rms_norm", decoder_act_fns="silu", scaling_factor=0.41407, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) torch.manual_seed(0) text_encoder_config = Gemma2Config( head_dim=16, hidden_size=8, initializer_range=0.02, intermediate_size=64, max_position_embeddings=8192, model_type="gemma2", num_attention_heads=2, num_hidden_layers=1, num_key_value_heads=2, vocab_size=8, attn_implementation="eager", ) text_encoder = Gemma2Model(text_encoder_config) tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 32, "width": 32, "max_sequence_length": 16, "output_type": "pt", "complex_human_instruction": None, } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs)[0] generated_image = image[0] self.assertEqual(generated_image.shape, (3, 32, 32)) expected_image = torch.randn(3, 32, 32) max_diff = np.abs(generated_image - expected_image).max() self.assertLessEqual(max_diff, 1e10) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling pipe.vae.enable_tiling( tile_sample_min_height=96, tile_sample_min_width=96, tile_sample_stride_height=64, tile_sample_stride_width=64, ) inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", ) # TODO(aryan): Create a dummy gemma model with smol vocab size @unittest.skip( "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_consistent(self): pass @unittest.skip( "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_single_identical(self): pass def test_float16_inference(self): # Requires higher tolerance as model seems very sensitive to dtype super().test_float16_inference(expected_max_diff=0.08) @slow @require_torch_accelerator class SanaPipelineIntegrationTests(unittest.TestCase): prompt = "A painting of a squirrel eating a burger." def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_sana_1024(self): generator = torch.Generator("cpu").manual_seed(0) pipe = SanaPipeline.from_pretrained( "Efficient-Large-Model/Sana_1600M_1024px_diffusers", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload(device=torch_device) image = pipe( prompt=self.prompt, height=1024, width=1024, generator=generator, num_inference_steps=20, output_type="np", ).images[0] image = image.flatten() output_slice = np.concatenate((image[:16], image[-16:])) # fmt: off expected_slice = np.array([0.0427, 0.0789, 0.0662, 0.0464, 0.082, 0.0574, 0.0535, 0.0886, 0.0647, 0.0549, 0.0872, 0.0605, 0.0593, 0.0942, 0.0674, 0.0581, 0.0076, 0.0168, 0.0027, 0.0063, 0.0159, 0.0, 0.0071, 0.0198, 0.0034, 0.0105, 0.0212, 0.0, 0.0, 0.0166, 0.0042, 0.0125]) # fmt: on self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-4)) def test_sana_512(self): generator = torch.Generator("cpu").manual_seed(0) pipe = SanaPipeline.from_pretrained( "Efficient-Large-Model/Sana_1600M_512px_diffusers", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload(device=torch_device) image = pipe( prompt=self.prompt, height=512, width=512, generator=generator, num_inference_steps=20, output_type="np", ).images[0] image = image.flatten() output_slice = np.concatenate((image[:16], image[-16:])) # fmt: off expected_slice = np.array([0.0803, 0.0774, 0.1108, 0.0872, 0.093, 0.1118, 0.0952, 0.0898, 0.1038, 0.0818, 0.0754, 0.0894, 0.074, 0.0691, 0.0906, 0.0671, 0.0154, 0.0254, 0.0203, 0.0178, 0.0283, 0.0193, 0.0215, 0.0273, 0.0188, 0.0212, 0.0273, 0.0151, 0.0061, 0.0244, 0.0212, 0.0259]) # fmt: on self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-4))
diffusers/tests/pipelines/sana/test_sana.py/0
{ "file_path": "diffusers/tests/pipelines/sana/test_sana.py", "repo_id": "diffusers", "token_count": 6414 }
200
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, StableCascadeCombinedPipeline from diffusers.models import StableCascadeUNet from diffusers.pipelines.wuerstchen import PaellaVQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class StableCascadeCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableCascadeCombinedPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "prior_guidance_scale", "decoder_guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "prior_num_inference_steps", "output_type", ] test_xformers_attention = True @property def text_embedder_hidden_size(self): return 32 @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "conditioning_dim": 128, "block_out_channels": (128, 128), "num_attention_heads": (2, 2), "down_num_layers_per_block": (1, 1), "up_num_layers_per_block": (1, 1), "clip_image_in_channels": 768, "switch_level": (False,), "clip_text_in_channels": self.text_embedder_hidden_size, "clip_text_pooled_in_channels": self.text_embedder_hidden_size, } model = StableCascadeUNet(**model_kwargs) return model.eval() @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, "out_channels": 4, "conditioning_dim": 128, "block_out_channels": (16, 32, 64, 128), "num_attention_heads": (-1, -1, 1, 2), "down_num_layers_per_block": (1, 1, 1, 1), "up_num_layers_per_block": (1, 1, 1, 1), "down_blocks_repeat_mappers": (1, 1, 1, 1), "up_blocks_repeat_mappers": (3, 3, 2, 2), "block_types_per_layer": ( ("SDCascadeResBlock", "SDCascadeTimestepBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ), "switch_level": None, "clip_text_pooled_in_channels": 32, "dropout": (0.1, 0.1, 0.1, 0.1), } model = StableCascadeUNet(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior scheduler = DDPMWuerstchenScheduler() tokenizer = self.dummy_tokenizer text_encoder = self.dummy_text_encoder decoder = self.dummy_decoder vqgan = self.dummy_vqgan prior_text_encoder = self.dummy_text_encoder prior_tokenizer = self.dummy_tokenizer components = { "text_encoder": text_encoder, "tokenizer": tokenizer, "decoder": decoder, "scheduler": scheduler, "vqgan": vqgan, "prior_text_encoder": prior_text_encoder, "prior_tokenizer": prior_tokenizer, "prior_prior": prior, "prior_scheduler": scheduler, "prior_feature_extractor": None, "prior_image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_guidance_scale": 4.0, "decoder_guidance_scale": 4.0, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "np", "height": 128, "width": 128, } return inputs def test_stable_cascade(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[-3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_slice.flatten()}" ) assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" ) @require_torch_accelerator def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=2e-2) @unittest.skip(reason="fp16 not supported") def test_float16_inference(self): super().test_float16_inference() @unittest.skip(reason="no callback test for combined pipeline") def test_callback_inputs(self): super().test_callback_inputs()
diffusers/tests/pipelines/stable_cascade/test_stable_cascade_combined.py/0
{ "file_path": "diffusers/tests/pipelines/stable_cascade/test_stable_cascade_combined.py", "repo_id": "diffusers", "token_count": 3914 }
201
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_inpaint_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) model_id = "xvjiarui/stable-diffusion-2-inpainting" pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] init_image = num_samples * [init_image] mask_image = num_samples * [mask_image] prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) processed_masked_images = shard(processed_masked_images) processed_masks = shard(processed_masks) output = pipeline( prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True ) images = output.images.reshape(num_samples, 512, 512, 3) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) assert jnp.abs(output_slice - expected_slice).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py", "repo_id": "diffusers", "token_count": 1237 }
202
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, AutoencoderTiny, EDMDPMSolverMultistepScheduler, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_image, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class StableDiffusionXLImg2ImgPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( {"add_text_embeds", "add_time_ids", "add_neg_time_ids"} ) supports_dduf = False def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "requires_aesthetics_score": True, "image_encoder": image_encoder, "feature_extractor": feature_extractor, } return components def get_dummy_tiny_autoencoder(self): return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_stable_diffusion_xl_img2img_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4664, 0.4886, 0.4403, 0.6902, 0.5592, 0.4534, 0.5931, 0.5951, 0.5224]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_img2img_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_img2img_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @unittest.skip("Skip for now.") def test_save_load_optional_components(self): pass def test_ip_adapter(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.5133, 0.4626, 0.4970, 0.6273, 0.5160, 0.6891, 0.6639, 0.5892, 0.5709]) return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) def test_stable_diffusion_xl_img2img_tiny_autoencoder(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.vae = self.get_dummy_tiny_autoencoder() sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.0, 0.0, 0.0106, 0.0, 0.0, 0.0087, 0.0052, 0.0062, 0.0177]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) @require_torch_accelerator def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["num_inference_steps"] = 5 inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 5 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], strength=0.8, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) class StableDiffusionXLImg2ImgRefinerOnlyPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=72, # 5 * 8 + 32 cross_attention_dim=32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "tokenizer": None, "text_encoder": None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "requires_aesthetics_score": True, "image_encoder": None, "feature_extractor": None, } return components def test_components_function(self): init_components = self.get_dummy_components() init_components.pop("requires_aesthetics_score") pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "strength": 0.8, } return inputs def test_stable_diffusion_xl_img2img_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4745, 0.4924, 0.4338, 0.6468, 0.5547, 0.4419, 0.5646, 0.5897, 0.5146]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @require_torch_accelerator def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_img2img_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=( 0, 0, ), negative_target_size=(1024, 1024), ).images image_slice_with_neg_conditions = image[0, -3:, -3:, -1] assert ( np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() > 1e-4 ) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @unittest.skip("We test this functionality elsewhere already.") def test_save_load_optional_components(self): pass @slow class StableDiffusionXLImg2ImgPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_stable_diffusion_xl_img2img_playground(self): torch.manual_seed(0) model_path = "playgroundai/playground-v2.5-1024px-aesthetic" sd_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( model_path, torch_dtype=torch.float16, variant="fp16", add_watermarker=False ) sd_pipe.enable_model_cpu_offload() sd_pipe.scheduler = EDMDPMSolverMultistepScheduler.from_config( sd_pipe.scheduler.config, use_karras_sigmas=True ) sd_pipe.set_progress_bar_config(disable=None) prompt = "a photo of an astronaut riding a horse on mars" url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" init_image = load_image(url).convert("RGB") image = sd_pipe( prompt, num_inference_steps=30, guidance_scale=8.0, image=init_image, height=1024, width=1024, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 1024, 1024, 3) expected_slice = np.array([0.3519, 0.3149, 0.3364, 0.3505, 0.3402, 0.3371, 0.3554, 0.3495, 0.3333]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py", "repo_id": "diffusers", "token_count": 12509 }
203
import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel import diffusers from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxTransformer2DModel, VisualClozePipeline from diffusers.utils import logging from diffusers.utils.testing_utils import ( CaptureLogger, enable_full_determinism, floats_tensor, require_accelerator, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class VisualClozePipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = VisualClozePipeline params = frozenset( [ "task_prompt", "content_prompt", "upsampling_height", "upsampling_width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds", "upsampling_strength", ] ) batch_params = frozenset(["task_prompt", "content_prompt", "image"]) test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) transformer = FluxTransformer2DModel( patch_size=1, in_channels=12, out_channels=4, num_layers=1, num_single_layers=1, attention_head_dim=6, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[2, 2, 2], ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModel(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=1, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, "resolution": 32, } def get_dummy_inputs(self, device, seed=0): # Create example images to simulate the input format required by VisualCloze context_image = [ Image.fromarray(floats_tensor((32, 32, 3), rng=random.Random(seed), scale=255).numpy().astype(np.uint8)) for _ in range(2) ] query_image = [ Image.fromarray( floats_tensor((32, 32, 3), rng=random.Random(seed + 1), scale=255).numpy().astype(np.uint8) ), None, ] # Create an image list that conforms to the VisualCloze input format image = [ context_image, # In-Context example query_image, # Query image ] if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "task_prompt": "Each row outlines a logical process, starting from [IMAGE1] gray-based depth map with detailed object contours, to achieve [IMAGE2] an image with flawless clarity.", "content_prompt": "A beautiful landscape with mountains and a lake", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "upsampling_height": 32, "upsampling_width": 32, "max_sequence_length": 77, "output_type": "np", "upsampling_strength": 0.4, } return inputs def test_visualcloze_different_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["task_prompt"] = "A different task to perform." output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different assert max_diff > 1e-6 def test_visualcloze_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) height_width_pairs = [(32, 32), (72, 57)] for height, width in height_width_pairs: expected_height = height - height % (pipe.generation_pipe.vae_scale_factor * 2) expected_width = width - width % (pipe.generation_pipe.vae_scale_factor * 2) inputs.update({"upsampling_height": height, "upsampling_width": width}) image = pipe(**inputs).images[0] output_height, output_width, _ = image.shape assert (output_height, output_width) == (expected_height, expected_width) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) def test_upsampling_strength(self, expected_min_diff=1e-1): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) # Test different upsampling strengths inputs["upsampling_strength"] = 0.2 output_no_upsampling = pipe(**inputs).images[0] inputs["upsampling_strength"] = 0.8 output_full_upsampling = pipe(**inputs).images[0] # Different upsampling strengths should produce different outputs max_diff = np.abs(output_no_upsampling - output_full_upsampling).max() assert max_diff > expected_min_diff def test_different_task_prompts(self, expected_min_diff=1e-1): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_original = pipe(**inputs).images[0] inputs["task_prompt"] = "A different task description for image generation" output_different_task = pipe(**inputs).images[0] # Different task prompts should produce different outputs max_diff = np.abs(output_original - output_different_task).max() assert max_diff > expected_min_diff @unittest.skip( "Test not applicable because the pipeline being tested is a wrapper pipeline. CFG tests should be done on the inner pipelines." ) def test_callback_cfg(self): pass def test_save_load_local(self, expected_max_difference=5e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) with CaptureLogger(logger) as cap_logger: # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware # This attribute is not serialized in the config of the pipeline pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, resolution=32) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() for name in pipe_loaded.components.keys(): if name not in pipe_loaded._optional_components: assert name in str(cap_logger) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) def test_save_load_optional_components(self, expected_max_difference=1e-4): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware # This attribute is not serialized in the config of the pipeline pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, resolution=32) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware # This attribute is not serialized in the config of the pipeline pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16, resolution=32) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." )
diffusers/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py/0
{ "file_path": "diffusers/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py", "repo_id": "diffusers", "token_count": 6136 }
204
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class DDPMSchedulerTest(SchedulerCommonTest): scheduler_classes = (DDPMScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_variance_type(self): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_prediction_type(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_time_indices(self): for t in [0, 500, 999]: self.check_over_forward(time_step=t) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 258.9606) < 1e-2 assert abs(result_mean.item() - 0.3372) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 202.0296) < 1e-2 assert abs(result_mean.item() - 0.2631) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps) def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) num_trained_timesteps = len(scheduler) t_start = num_trained_timesteps - 2 model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(0) # add noise noise = self.dummy_noise_deter timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for t in timesteps: # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample sample = pred_prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}" assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}"
diffusers/tests/schedulers/test_scheduler_ddpm.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_ddpm.py", "repo_id": "diffusers", "token_count": 3860 }
205
import tempfile from typing import Dict, List, Tuple import torch from diffusers import LCMScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class LCMSchedulerTest(SchedulerCommonTest): scheduler_classes = (LCMScheduler,) forward_default_kwargs = (("num_inference_steps", 10),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.00085, "beta_end": 0.0120, "beta_schedule": "scaled_linear", "prediction_type": "epsilon", } config.update(**kwargs) return config @property def default_valid_timestep(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timestep = scheduler.timesteps[-1] return timestep def test_timesteps(self): for timesteps in [100, 500, 1000]: # 0 is not guaranteed to be in the timestep schedule, but timesteps - 1 is self.check_over_configs(time_step=timesteps - 1, num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(time_step=self.default_valid_timestep, beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear", "squaredcos_cap_v2"]: self.check_over_configs(time_step=self.default_valid_timestep, beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(time_step=self.default_valid_timestep, prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(time_step=self.default_valid_timestep, clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(time_step=self.default_valid_timestep, thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( time_step=self.default_valid_timestep, thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): # Get default timestep schedule. kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timesteps = scheduler.timesteps for t in timesteps: self.check_over_forward(time_step=t) def test_inference_steps(self): # Hardcoded for now for t, num_inference_steps in zip([99, 39, 39, 19], [10, 25, 26, 50]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) # Override test_add_noise_device because the hardcoded num_inference_steps of 100 doesn't work # for LCMScheduler under default settings def test_add_noise_device(self, num_inference_steps=10): for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(num_inference_steps) sample = self.dummy_sample.to(torch_device) scaled_sample = scheduler.scale_model_input(sample, 0.0) self.assertEqual(sample.shape, scaled_sample.shape) noise = torch.randn(scaled_sample.shape).to(torch_device) t = scheduler.timesteps[5][None] noised = scheduler.add_noise(scaled_sample, noise, t) self.assertEqual(noised.shape, scaled_sample.shape) # Override test_from_save_pretrained because it hardcodes a timestep of 1 def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: timestep = self.default_valid_timestep scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler = scheduler_class.from_pretrained(tmpdirname) scheduler.set_timesteps(num_inference_steps) new_scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample kwargs["generator"] = torch.manual_seed(0) new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" # Override test_step_shape because uses 0 and 1 as hardcoded timesteps def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample scheduler.set_timesteps(num_inference_steps) timestep_0 = scheduler.timesteps[-2] timestep_1 = scheduler.timesteps[-1] output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) # Override test_set_scheduler_outputs_equivalence since it uses 0 as a hardcoded timestep def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", 50) timestep = self.default_valid_timestep for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) sample = self.dummy_sample residual = 0.1 * sample scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) outputs_dict = scheduler.step(residual, timestep, sample, **kwargs) scheduler.set_timesteps(num_inference_steps) kwargs["generator"] = torch.manual_seed(0) outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple, outputs_dict) def full_loop(self, num_inference_steps=10, seed=0, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(seed) scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, generator).prev_sample return sample def test_full_loop_onestep(self): sample = self.full_loop(num_inference_steps=1) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) # TODO: get expected sum and mean assert abs(result_sum.item() - 18.7097) < 1e-3 assert abs(result_mean.item() - 0.0244) < 1e-3 def test_full_loop_multistep(self): sample = self.full_loop(num_inference_steps=10) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) # TODO: get expected sum and mean assert abs(result_sum.item() - 197.7616) < 1e-3 assert abs(result_mean.item() - 0.2575) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps)
diffusers/tests/schedulers/test_scheduler_lcm.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_lcm.py", "repo_id": "diffusers", "token_count": 5668 }
206
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers import ( MotionAdapter, ) from diffusers.utils.testing_utils import ( enable_full_determinism, ) enable_full_determinism() class MotionAdapterSingleFileTests(unittest.TestCase): model_class = MotionAdapter def test_single_file_components_version_v1_5(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15.ckpt" repo_id = "guoyww/animatediff-motion-adapter-v1-5" model = self.model_class.from_pretrained(repo_id) model_single_file = self.model_class.from_single_file(ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between pretrained loading and single file loading" ) def test_single_file_components_version_v1_5_2(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt" repo_id = "guoyww/animatediff-motion-adapter-v1-5-2" model = self.model_class.from_pretrained(repo_id) model_single_file = self.model_class.from_single_file(ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between pretrained loading and single file loading" ) def test_single_file_components_version_v1_5_3(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt" repo_id = "guoyww/animatediff-motion-adapter-v1-5-3" model = self.model_class.from_pretrained(repo_id) model_single_file = self.model_class.from_single_file(ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between pretrained loading and single file loading" ) def test_single_file_components_version_sdxl_beta(self): ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt" repo_id = "guoyww/animatediff-motion-adapter-sdxl-beta" model = self.model_class.from_pretrained(repo_id) model_single_file = self.model_class.from_single_file(ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between pretrained loading and single file loading" )
diffusers/tests/single_file/test_model_motion_adapter_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_model_motion_adapter_single_file.py", "repo_id": "diffusers", "token_count": 1633 }
207
import gc import unittest import torch from diffusers import StableDiffusionXLInstructPix2PixPipeline from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionXLInstructPix2PixPipeline(unittest.TestCase): pipeline_class = StableDiffusionXLInstructPix2PixPipeline ckpt_path = "https://huggingface.co/stabilityai/cosxl/blob/main/cosxl_edit.safetensors" original_config = None repo_id = "diffusers/sdxl-instructpix2pix-768" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "generator": generator, "num_inference_steps": 2, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_single_file_setting_cosxl_edit(self): # Default is PNDM for this checkpoint pipe = self.pipeline_class.from_single_file(self.ckpt_path, config=self.repo_id, is_cosxl_edit=True) assert pipe.is_cosxl_edit is True
diffusers/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py/0
{ "file_path": "diffusers/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py", "repo_id": "diffusers", "token_count": 685 }
208
# Asynchronous Inference With our [SmolVLA](https://huggingface.co/papers/2506.01844) we introduced a new way to run inference on real-world robots, **decoupling action prediction from action execution**. In this tutorial, we'll show how to use asynchronous inference (_async inference_) using a finetuned version of SmolVLA, and all the policies supported by LeRobot. **Try async inference with all the policies** supported by LeRobot! **What you'll learn:** 1. Why asynchronous inference matters and how it compares to, more traditional, sequential inference. 2. How to spin-up a `PolicyServer` and connect a `RobotClient` from the same machine, and even over the network. 3. How to tune key parameters (`actions_per_chunk`, `chunk_size_threshold`) for your robot and policy. If you get stuck, hop into our [Discord community](https://discord.gg/s3KuuzsPFb)! In a nutshell: with _async inference_, your robot keeps acting while the policy server is already busy computing the next chunk of actions---eliminating "wait-for-inference" lags and unlocking smoother, more reactive behaviours. This is fundamentally different from synchronous inference (sync), where the robot stays idle while the policy computes the next chunk of actions. --- ## Getting started with async inference You can read more information on asynchronous inference in our [blogpost](https://huggingface.co/blog/async-robot-inference). This guide is designed to help you quickly set up and run asynchronous inference in your environment. First, install `lerobot` with the `async` tag, to install the extra dependencies required to run async inference. ```shell pip install -e ".[async]" ``` Then, spin up a policy server (in one terminal, or in a separate machine) specifying the host address and port for the client to connect to. You can spin up a policy server running: ```shell python src/lerobot/scripts/server/policy_server.py \ --host=127.0.0.1 \ --port=8080 \ ``` This will start a policy server listening on `127.0.0.1:8080` (`localhost`, port 8080). At this stage, the policy server is empty, as all information related to which policy to run and with which parameters are specified during the first handshake with the client. Spin up a client with: ```shell python src/lerobot/scripts/server/robot_client.py \ --server_address=127.0.0.1:8080 \ # SERVER: the host address and port of the policy server --robot.type=so100_follower \ # ROBOT: your robot type --robot.port=/dev/tty.usbmodem585A0076841 \ # ROBOT: your robot port --robot.id=follower_so100 \ # ROBOT: your robot id, to load calibration file --robot.cameras="{ laptop: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}, phone: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ # POLICY: the cameras used to acquire frames, with keys matching the keys expected by the policy --task="dummy" \ # POLICY: The task to run the policy on (`Fold my t-shirt`). Not necessarily defined for all policies, such as `act` --policy_type=your_policy_type \ # POLICY: the type of policy to run (smolvla, act, etc) --pretrained_name_or_path=user/model \ # POLICY: the model name/path on server to the checkpoint to run (e.g., lerobot/smolvla_base) --policy_device=mps \ # POLICY: the device to run the policy on, on the server --actions_per_chunk=50 \ # POLICY: the number of actions to output at once --chunk_size_threshold=0.5 \ # CLIENT: the threshold for the chunk size before sending a new observation to the server --aggregate_fn_name=weighted_average \ # CLIENT: the function to aggregate actions on overlapping portions --debug_visualize_queue_size=True # CLIENT: whether to visualize the queue size at runtime ``` In summary, you need to specify instructions for: - `SERVER`: the address and port of the policy server - `ROBOT`: the type of robot to connect to, the port to connect to, and the local `id` of the robot - `POLICY`: the type of policy to run, and the model name/path on server to the checkpoint to run. You also need to specify which device should the sever be using, and how many actions to output at once (capped at the policy max actions value). - `CLIENT`: the threshold for the chunk size before sending a new observation to the server, and the function to aggregate actions on overlapping portions. Optionally, you can also visualize the queue size at runtime, to help you tune the `CLIENT` parameters. Importantly, - `actions_per_chunk` and `chunk_size_threshold` are key parameters to tune for your setup. - `aggregate_fn_name` is the function to aggregate actions on overlapping portions. You can either add a new one to a registry of functions, or add your own in `robot_client.py` (see [here](NOTE:addlinktoLOC)) - `debug_visualize_queue_size` is a useful tool to tune the `CLIENT` parameters. ## Done! You should see your robot moving around by now 😉 ## Async vs. synchronous inference Synchronous inference relies on interleaving action chunk prediction and action execution. This inherently results in _idle frames_, frames where the robot awaits idle the policy's output: a new action chunk. In turn, inference is plagued by evident real-time lags, where the robot simply stops acting due to the lack of available actions. With robotics models increasing in size, this problem risks becoming only more severe. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/async-inference/sync.png" width="80%" ></img> </p> <p align="center"> <i>Synchronous inference</i> makes the robot idle while the policy is computing the next chunk of actions. </p> To overcome this, we design async inference, a paradigm where action planning and execution are decoupled, resulting in (1) higher adaptability and, most importantly, (2) no idle frames. Crucially, with async inference, the next action chunk is computed _before_ the current one is exhausted, resulting in no idleness. Higher adaptability is ensured by aggregating the different action chunks on overlapping portions, obtaining an up-to-date plan and a tighter control loop. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/async-inference/async.png" width="80%" ></img> </p> <p align="center"> <i>Asynchronous inference</i> results in no idleness because the next chunk is computed before the current chunk is exhausted. </p> --- ## Start the Policy Server Policy servers are wrappers around a `PreTrainedPolicy` interfacing them with observations coming from a robot client. Policy servers are initialized as empty containers which are populated with the requested policy specified in the initial handshake between the robot client and the policy server. As such, spinning up a policy server is as easy as specifying the host address and port. If you're running the policy server on the same machine as the robot client, you can use `localhost` as the host address. <hfoptions id="start_policy_server"> <hfoption id="Command"> ```bash python -m lerobot.scripts.server.policy_server \ --host="localhost" \ --port=8080 ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python from lerobot.scripts.server.configs import PolicyServerConfig from lerobot.scripts.server.policy_server import serve config = PolicyServerConfig( host="localhost", port=8080, ) serve(config) ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> This listens on `localhost:8080` for an incoming connection from the associated`RobotClient`, which will communicate which policy to run during the first client-server handshake. --- ## Launch the Robot Client `RobotClient` is a wrapper around a `Robot` instance, which `RobotClient` connects to the (possibly remote) `PolicyServer`. The `RobotClient` streams observations to the `PolicyServer`, and receives action chunks obtained running inference on the server (which we assume to have better computational resources than the robot controller). <hfoptions id="start_robot_client"> <hfoption id="Command"> ```bash python src/lerobot/scripts/server/robot_client.py \ --server_address=127.0.0.1:8080 \ # SERVER: the host address and port of the policy server --robot.type=so100_follower \ # ROBOT: your robot type --robot.port=/dev/tty.usbmodem585A0076841 \ # ROBOT: your robot port --robot.id=follower_so100 \ # ROBOT: your robot id, to load calibration file --robot.cameras="{ laptop: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}, phone: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ # POLICY: the cameras used to acquire frames, with keys matching the keys expected by the policy --task="dummy" \ # POLICY: The task to run the policy on (`Fold my t-shirt`). Not necessarily defined for all policies, such as `act` --policy_type=your_policy_type \ # POLICY: the type of policy to run (smolvla, act, etc) --pretrained_name_or_path=user/model \ # POLICY: the model name/path on server to the checkpoint to run (e.g., lerobot/smolvla_base) --policy_device=mps \ # POLICY: the device to run the policy on, on the server --actions_per_chunk=50 \ # POLICY: the number of actions to output at once --chunk_size_threshold=0.5 \ # CLIENT: the threshold for the chunk size before sending a new observation to the server --aggregate_fn_name=weighted_average \ # CLIENT: the function to aggregate actions on overlapping portions --debug_visualize_queue_size=True # CLIENT: whether to visualize the queue size at runtime ``` </hfoption> <hfoption id="API example"> <!-- prettier-ignore-start --> ```python import threading from lerobot.robots.so100_follower import SO100FollowerConfig from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig from lerobot.scripts.server.configs import RobotClientConfig from lerobot.scripts.server.robot_client import RobotClient from lerobot.scripts.server.helpers import visualize_action_queue_size # 1. Create the robot instance """Check out the cameras available in your setup by running `python lerobot/find_cameras.py`""" # these cameras must match the ones expected by the policy # check the config.json on the Hub for the policy you are using camera_cfg = { "top": OpenCVCameraConfig(index_or_path=0, width=640, height=480, fps=30), "side": OpenCVCameraConfig(index_or_path=1, width=640, height=480, fps=30) } robot_cfg = SO100FollowerConfig( port="/dev/tty.usbmodem585A0076841", id="follower_so100", cameras=camera_cfg ) # 3. Create client configuration client_cfg = RobotClientConfig( robot=robot_cfg, server_address="localhost:8080", policy_device="mps", policy_type="smolvla", pretrained_name_or_path="fracapuano/smolvla_async", chunk_size_threshold=0.5, actions_per_chunk=50, # make sure this is less than the max actions of the policy ) # 4. Create and start client client = RobotClient(client_cfg) # 5. Specify the task task = "Don't do anything, stay still" if client.start(): # Start action receiver thread action_receiver_thread = threading.Thread(target=client.receive_actions, daemon=True) action_receiver_thread.start() try: # Run the control loop client.control_loop(task) except KeyboardInterrupt: client.stop() action_receiver_thread.join() # (Optionally) plot the action queue size visualize_action_queue_size(client.action_queue_size) ``` <!-- prettier-ignore-end --> </hfoption> </hfoptions> The following two parameters are key in every setup: <table> <thead> <tr> <th>Hyperparameter</th> <th>Default</th> <th>What it does</th> </tr> </thead> <tbody> <tr> <td> <code>actions_per_chunk</code> </td> <td>50</td> <td> How many actions the policy outputs at once. Typical values: 10-50. </td> </tr> <tr> <td> <code>chunk_size_threshold</code> </td> <td>0.7</td> <td> When the queue is ≤ 50% full, the client sends a fresh observation. Value in [0, 1]. </td> </tr> </tbody> </table> <Tip> Different values of `actions_per_chunk` and `chunk_size_threshold` do result in different behaviours. </Tip> On the one hand, increasing the value of `actions_per_chunk` will result in reducing the likelihood of ending up with no actions to execute, as more actions will be available when the new chunk is computed. However, larger values of `actions_per_chunk` might also result in less precise actions, due to the compounding errors consequent to predicting actions over longer timespans. On the other hand, increasing the value of `chunk_size_threshold` will result in sending out to the `PolicyServer` observations for inference more often, resulting in a larger number of updates action chunks, overlapping on significant portions. This results in high adaptability, in the limit predicting one action chunk for each observation, which is in turn only marginally consumed while a new one is produced. This option does also put more pressure on the inference pipeline, as a consequence of the many requests. Conversely, values of `chunk_size_threshold` close to 0.0 collapse to the synchronous edge case, whereby new observations are only sent out whenever the current chunk is exhausted. We found the default values of `actions_per_chunk` and `chunk_size_threshold` to work well in the experiments we developed for the [SmolVLA paper](https://huggingface.co/papers/2506.01844), but recommend experimenting with different values to find the best fit for your setup. ### Tuning async inference for your setup 1. **Choose your computational resources carefully.** [PI0](https://huggingface.co/lerobot/pi0) occupies 14GB of memory at inference time, while [SmolVLA](https://huggingface.co/lerobot/smolvla_base) requires only ~2GB. You should identify the best computational resource for your use case keeping in mind smaller policies require less computational resources. The combination of policy and device used (CPU-intensive, using MPS, or the number of CUDA cores on a given NVIDIA GPU) directly impacts the average inference latency you should expect. 2. **Adjust your `fps` based on inference latency.** While the server generates a new action chunk, the client is not idle and is stepping through its current action queue. If the two processes happen at fundamentally different speeds, the client might end up with an empty queue. As such, you should reduce your fps if you consistently run out of actions in queue. 3. **Adjust `chunk_size_threshold`**. - Values closer to `0.0` result in almost sequential behavior. Values closer to `1.0` → send observation every step (more bandwidth, relies on good world-model). - We found values around 0.5-0.6 to work well. If you want to tweak this, spin up a `RobotClient` setting the `--debug-visualize-queue-size` to `True`. This will plot the action queue size evolution at runtime, and you can use it to find the value of `chunk_size_threshold` that works best for your setup. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/async-inference/queues.png" width="80%" ></img> </p> <p align="center"> <i> The action queue size is plotted at runtime when the `--debug-visualize-queue-size` flag is passed, for various levels of `chunk_size_threshold` (`g` in the SmolVLA paper). </i> </p> --- ## Conclusion Asynchronous inference represents a significant advancement in real-time robotics control, addressing the fundamental challenge of inference latency that has long plagued robotics applications. Through this tutorial, you've learned how to implement a complete async inference pipeline that eliminates idle frames and enables smoother, more reactive robot behaviors. **Key Takeaways:** - **Paradigm Shift**: Async inference decouples action prediction from execution, allowing robots to continue acting while new action chunks are computed in parallel - **Performance Benefits**: Eliminates "wait-for-inference" lags that are inherent in synchronous approaches, becoming increasingly important as policy models grow larger - **Flexible Architecture**: The server-client design enables distributed computing, where inference can run on powerful remote hardware while maintaining real-time robot control - **Tunable Parameters**: Success depends on properly configuring `actions_per_chunk` and `chunk_size_threshold` for your specific hardware, policy, and task requirements - **Universal Compatibility**: Works with all LeRobot-supported policies, from lightweight ACT models to vision-language models like SmolVLA Start experimenting with the default parameters, monitor your action queue sizes, and iteratively refine your setup to achieve optimal performance for your specific use case. If you want to discuss this further, hop into our [Discord community](https://discord.gg/s3KuuzsPFb), or open an issue on our [GitHub repository](https://github.com/lerobot/lerobot/issues).
lerobot/docs/source/async.mdx/0
{ "file_path": "lerobot/docs/source/async.mdx", "repo_id": "lerobot", "token_count": 5042 }
209
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import builtins import datetime as dt import os from dataclasses import dataclass, field from pathlib import Path import draccus from huggingface_hub import hf_hub_download from huggingface_hub.errors import HfHubHTTPError from lerobot import envs from lerobot.configs import parser from lerobot.configs.default import DatasetConfig, EvalConfig, WandBConfig from lerobot.configs.policies import PreTrainedConfig from lerobot.optim import OptimizerConfig from lerobot.optim.schedulers import LRSchedulerConfig from lerobot.utils.hub import HubMixin TRAIN_CONFIG_NAME = "train_config.json" @dataclass class TrainPipelineConfig(HubMixin): dataset: DatasetConfig env: envs.EnvConfig | None = None policy: PreTrainedConfig | None = None # Set `dir` to where you would like to save all of the run outputs. If you run another training session # with the same value for `dir` its contents will be overwritten unless you set `resume` to true. output_dir: Path | None = None job_name: str | None = None # Set `resume` to true to resume a previous run. In order for this to work, you will need to make sure # `dir` is the directory of an existing run with at least one checkpoint in it. # Note that when resuming a run, the default behavior is to use the configuration from the checkpoint, # regardless of what's provided with the training command at the time of resumption. resume: bool = False # `seed` is used for training (eg: model initialization, dataset shuffling) # AND for the evaluation environments. seed: int | None = 1000 # Number of workers for the dataloader. num_workers: int = 4 batch_size: int = 8 steps: int = 100_000 eval_freq: int = 20_000 log_freq: int = 200 save_checkpoint: bool = True # Checkpoint is saved every `save_freq` training iterations and after the last training step. save_freq: int = 20_000 use_policy_training_preset: bool = True optimizer: OptimizerConfig | None = None scheduler: LRSchedulerConfig | None = None eval: EvalConfig = field(default_factory=EvalConfig) wandb: WandBConfig = field(default_factory=WandBConfig) def __post_init__(self): self.checkpoint_path = None def validate(self): # HACK: We parse again the cli args here to get the pretrained paths if there was some. policy_path = parser.get_path_arg("policy") if policy_path: # Only load the policy config cli_overrides = parser.get_cli_overrides("policy") self.policy = PreTrainedConfig.from_pretrained(policy_path, cli_overrides=cli_overrides) self.policy.pretrained_path = policy_path elif self.resume: # The entire train config is already loaded, we just need to get the checkpoint dir config_path = parser.parse_arg("config_path") if not config_path: raise ValueError( f"A config_path is expected when resuming a run. Please specify path to {TRAIN_CONFIG_NAME}" ) if not Path(config_path).resolve().exists(): raise NotADirectoryError( f"{config_path=} is expected to be a local path. " "Resuming from the hub is not supported for now." ) policy_path = Path(config_path).parent self.policy.pretrained_path = policy_path self.checkpoint_path = policy_path.parent if not self.job_name: if self.env is None: self.job_name = f"{self.policy.type}" else: self.job_name = f"{self.env.type}_{self.policy.type}" if not self.resume and isinstance(self.output_dir, Path) and self.output_dir.is_dir(): raise FileExistsError( f"Output directory {self.output_dir} already exists and resume is {self.resume}. " f"Please change your output directory so that {self.output_dir} is not overwritten." ) elif not self.output_dir: now = dt.datetime.now() train_dir = f"{now:%Y-%m-%d}/{now:%H-%M-%S}_{self.job_name}" self.output_dir = Path("outputs/train") / train_dir if isinstance(self.dataset.repo_id, list): raise NotImplementedError("LeRobotMultiDataset is not currently implemented.") if not self.use_policy_training_preset and (self.optimizer is None or self.scheduler is None): raise ValueError("Optimizer and Scheduler must be set when the policy presets are not used.") elif self.use_policy_training_preset and not self.resume: self.optimizer = self.policy.get_optimizer_preset() self.scheduler = self.policy.get_scheduler_preset() if self.policy.push_to_hub and not self.policy.repo_id: raise ValueError( "'policy.repo_id' argument missing. Please specify it to push the model to the hub." ) @classmethod def __get_path_fields__(cls) -> list[str]: """This enables the parser to load config from the policy using `--policy.path=local/dir`""" return ["policy"] def to_dict(self) -> dict: return draccus.encode(self) def _save_pretrained(self, save_directory: Path) -> None: with open(save_directory / TRAIN_CONFIG_NAME, "w") as f, draccus.config_type("json"): draccus.dump(self, f, indent=4) @classmethod def from_pretrained( cls: builtins.type["TrainPipelineConfig"], pretrained_name_or_path: str | Path, *, force_download: bool = False, resume_download: bool = None, proxies: dict | None = None, token: str | bool | None = None, cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, **kwargs, ) -> "TrainPipelineConfig": model_id = str(pretrained_name_or_path) config_file: str | None = None if Path(model_id).is_dir(): if TRAIN_CONFIG_NAME in os.listdir(model_id): config_file = os.path.join(model_id, TRAIN_CONFIG_NAME) else: print(f"{TRAIN_CONFIG_NAME} not found in {Path(model_id).resolve()}") elif Path(model_id).is_file(): config_file = model_id else: try: config_file = hf_hub_download( repo_id=model_id, filename=TRAIN_CONFIG_NAME, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) except HfHubHTTPError as e: raise FileNotFoundError( f"{TRAIN_CONFIG_NAME} not found on the HuggingFace Hub in {model_id}" ) from e cli_args = kwargs.pop("cli_args", []) with draccus.config_type("json"): return draccus.parse(cls, config_file, args=cli_args) @dataclass(kw_only=True) class TrainRLServerPipelineConfig(TrainPipelineConfig): dataset: DatasetConfig | None = None # NOTE: In RL, we don't need an offline dataset
lerobot/src/lerobot/configs/train.py/0
{ "file_path": "lerobot/src/lerobot/configs/train.py", "repo_id": "lerobot", "token_count": 3339 }
210
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import traceback from pathlib import Path from datasets import get_dataset_config_info from huggingface_hub import HfApi from lerobot import available_datasets from lerobot.datasets.lerobot_dataset import LeRobotDatasetMetadata from lerobot.datasets.utils import INFO_PATH, write_info from lerobot.datasets.v21.convert_dataset_v20_to_v21 import V20, SuppressWarnings LOCAL_DIR = Path("data/") hub_api = HfApi() def fix_dataset(repo_id: str) -> str: if not hub_api.revision_exists(repo_id, V20, repo_type="dataset"): return f"{repo_id}: skipped (not in {V20})." dataset_info = get_dataset_config_info(repo_id, "default") with SuppressWarnings(): lerobot_metadata = LeRobotDatasetMetadata(repo_id, revision=V20, force_cache_sync=True) meta_features = {key for key, ft in lerobot_metadata.features.items() if ft["dtype"] != "video"} parquet_features = set(dataset_info.features) diff_parquet_meta = parquet_features - meta_features diff_meta_parquet = meta_features - parquet_features if diff_parquet_meta: raise ValueError(f"In parquet not in info.json: {parquet_features - meta_features}") if not diff_meta_parquet: return f"{repo_id}: skipped (no diff)" if diff_meta_parquet: logging.warning(f"In info.json not in parquet: {meta_features - parquet_features}") assert diff_meta_parquet == {"language_instruction"} lerobot_metadata.features.pop("language_instruction") write_info(lerobot_metadata.info, lerobot_metadata.root) commit_info = hub_api.upload_file( path_or_fileobj=lerobot_metadata.root / INFO_PATH, path_in_repo=INFO_PATH, repo_id=repo_id, repo_type="dataset", revision=V20, commit_message="Remove 'language_instruction'", create_pr=True, ) return f"{repo_id}: success - PR: {commit_info.pr_url}" def batch_fix(): status = {} LOCAL_DIR.mkdir(parents=True, exist_ok=True) logfile = LOCAL_DIR / "fix_features_v20.txt" for num, repo_id in enumerate(available_datasets): print(f"\nConverting {repo_id} ({num}/{len(available_datasets)})") print("---------------------------------------------------------") try: status = fix_dataset(repo_id) except Exception: status = f"{repo_id}: failed\n {traceback.format_exc()}" logging.info(status) with open(logfile, "a") as file: file.write(status + "\n") if __name__ == "__main__": batch_fix()
lerobot/src/lerobot/datasets/v21/_remove_language_instruction.py/0
{ "file_path": "lerobot/src/lerobot/datasets/v21/_remove_language_instruction.py", "repo_id": "lerobot", "token_count": 1268 }
211
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(aliberts): Should we implement FastSyncRead/Write? # https://github.com/ROBOTIS-GIT/DynamixelSDK/pull/643 # https://github.com/ROBOTIS-GIT/DynamixelSDK/releases/tag/3.8.2 # https://emanual.robotis.com/docs/en/dxl/protocol2/#fast-sync-read-0x8a # -> Need to check compatibility across models import logging from copy import deepcopy from enum import Enum from lerobot.utils.encoding_utils import decode_twos_complement, encode_twos_complement from ..motors_bus import Motor, MotorCalibration, MotorsBus, NameOrID, Value, get_address from .tables import ( AVAILABLE_BAUDRATES, MODEL_BAUDRATE_TABLE, MODEL_CONTROL_TABLE, MODEL_ENCODING_TABLE, MODEL_NUMBER_TABLE, MODEL_RESOLUTION, ) PROTOCOL_VERSION = 2.0 DEFAULT_BAUDRATE = 1_000_000 DEFAULT_TIMEOUT_MS = 1000 NORMALIZED_DATA = ["Goal_Position", "Present_Position"] logger = logging.getLogger(__name__) class OperatingMode(Enum): # DYNAMIXEL only controls current(torque) regardless of speed and position. This mode is ideal for a # gripper or a system that only uses current(torque) control or a system that has additional # velocity/position controllers. CURRENT = 0 # This mode controls velocity. This mode is identical to the Wheel Mode(endless) from existing DYNAMIXEL. # This mode is ideal for wheel-type robots. VELOCITY = 1 # This mode controls position. This mode is identical to the Joint Mode from existing DYNAMIXEL. Operating # position range is limited by the Max Position Limit(48) and the Min Position Limit(52). This mode is # ideal for articulated robots that each joint rotates less than 360 degrees. POSITION = 3 # This mode controls position. This mode is identical to the Multi-turn Position Control from existing # DYNAMIXEL. 512 turns are supported(-256[rev] ~ 256[rev]). This mode is ideal for multi-turn wrists or # conveyer systems or a system that requires an additional reduction gear. Note that Max Position # Limit(48), Min Position Limit(52) are not used on Extended Position Control Mode. EXTENDED_POSITION = 4 # This mode controls both position and current(torque). Up to 512 turns are supported (-256[rev] ~ # 256[rev]). This mode is ideal for a system that requires both position and current control such as # articulated robots or grippers. CURRENT_POSITION = 5 # This mode directly controls PWM output. (Voltage Control Mode) PWM = 16 class DriveMode(Enum): NON_INVERTED = 0 INVERTED = 1 class TorqueMode(Enum): ENABLED = 1 DISABLED = 0 def _split_into_byte_chunks(value: int, length: int) -> list[int]: import dynamixel_sdk as dxl if length == 1: data = [value] elif length == 2: data = [dxl.DXL_LOBYTE(value), dxl.DXL_HIBYTE(value)] elif length == 4: data = [ dxl.DXL_LOBYTE(dxl.DXL_LOWORD(value)), dxl.DXL_HIBYTE(dxl.DXL_LOWORD(value)), dxl.DXL_LOBYTE(dxl.DXL_HIWORD(value)), dxl.DXL_HIBYTE(dxl.DXL_HIWORD(value)), ] return data class DynamixelMotorsBus(MotorsBus): """ The Dynamixel implementation for a MotorsBus. It relies on the python dynamixel sdk to communicate with the motors. For more info, see the Dynamixel SDK Documentation: https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20 """ apply_drive_mode = False available_baudrates = deepcopy(AVAILABLE_BAUDRATES) default_baudrate = DEFAULT_BAUDRATE default_timeout = DEFAULT_TIMEOUT_MS model_baudrate_table = deepcopy(MODEL_BAUDRATE_TABLE) model_ctrl_table = deepcopy(MODEL_CONTROL_TABLE) model_encoding_table = deepcopy(MODEL_ENCODING_TABLE) model_number_table = deepcopy(MODEL_NUMBER_TABLE) model_resolution_table = deepcopy(MODEL_RESOLUTION) normalized_data = deepcopy(NORMALIZED_DATA) def __init__( self, port: str, motors: dict[str, Motor], calibration: dict[str, MotorCalibration] | None = None, ): super().__init__(port, motors, calibration) import dynamixel_sdk as dxl self.port_handler = dxl.PortHandler(self.port) self.packet_handler = dxl.PacketHandler(PROTOCOL_VERSION) self.sync_reader = dxl.GroupSyncRead(self.port_handler, self.packet_handler, 0, 0) self.sync_writer = dxl.GroupSyncWrite(self.port_handler, self.packet_handler, 0, 0) self._comm_success = dxl.COMM_SUCCESS self._no_error = 0x00 def _assert_protocol_is_compatible(self, instruction_name: str) -> None: pass def _handshake(self) -> None: self._assert_motors_exist() def _find_single_motor(self, motor: str, initial_baudrate: int | None = None) -> tuple[int, int]: model = self.motors[motor].model search_baudrates = ( [initial_baudrate] if initial_baudrate is not None else self.model_baudrate_table[model] ) for baudrate in search_baudrates: self.set_baudrate(baudrate) id_model = self.broadcast_ping() if id_model: found_id, found_model = next(iter(id_model.items())) expected_model_nb = self.model_number_table[model] if found_model != expected_model_nb: raise RuntimeError( f"Found one motor on {baudrate=} with id={found_id} but it has a " f"model number '{found_model}' different than the one expected: '{expected_model_nb}'. " f"Make sure you are connected only connected to the '{motor}' motor (model '{model}')." ) return baudrate, found_id raise RuntimeError(f"Motor '{motor}' (model '{model}') was not found. Make sure it is connected.") def configure_motors(self, return_delay_time=0) -> None: # By default, Dynamixel motors have a 500µs delay response time (corresponding to a value of 250 on # the 'Return_Delay_Time' address). We ensure this is reduced to the minimum of 2µs (value of 0). for motor in self.motors: self.write("Return_Delay_Time", motor, return_delay_time) @property def is_calibrated(self) -> bool: return self.calibration == self.read_calibration() def read_calibration(self) -> dict[str, MotorCalibration]: offsets = self.sync_read("Homing_Offset", normalize=False) mins = self.sync_read("Min_Position_Limit", normalize=False) maxes = self.sync_read("Max_Position_Limit", normalize=False) drive_modes = self.sync_read("Drive_Mode", normalize=False) calibration = {} for motor, m in self.motors.items(): calibration[motor] = MotorCalibration( id=m.id, drive_mode=drive_modes[motor], homing_offset=offsets[motor], range_min=mins[motor], range_max=maxes[motor], ) return calibration def write_calibration(self, calibration_dict: dict[str, MotorCalibration], cache: bool = True) -> None: for motor, calibration in calibration_dict.items(): self.write("Homing_Offset", motor, calibration.homing_offset) self.write("Min_Position_Limit", motor, calibration.range_min) self.write("Max_Position_Limit", motor, calibration.range_max) if cache: self.calibration = calibration_dict def disable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: for motor in self._get_motors_list(motors): self.write("Torque_Enable", motor, TorqueMode.DISABLED.value, num_retry=num_retry) def _disable_torque(self, motor_id: int, model: str, num_retry: int = 0) -> None: addr, length = get_address(self.model_ctrl_table, model, "Torque_Enable") self._write(addr, length, motor_id, TorqueMode.DISABLED.value, num_retry=num_retry) def enable_torque(self, motors: str | list[str] | None = None, num_retry: int = 0) -> None: for motor in self._get_motors_list(motors): self.write("Torque_Enable", motor, TorqueMode.ENABLED.value, num_retry=num_retry) def _encode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: for id_ in ids_values: model = self._id_to_model(id_) encoding_table = self.model_encoding_table.get(model) if encoding_table and data_name in encoding_table: n_bytes = encoding_table[data_name] ids_values[id_] = encode_twos_complement(ids_values[id_], n_bytes) return ids_values def _decode_sign(self, data_name: str, ids_values: dict[int, int]) -> dict[int, int]: for id_ in ids_values: model = self._id_to_model(id_) encoding_table = self.model_encoding_table.get(model) if encoding_table and data_name in encoding_table: n_bytes = encoding_table[data_name] ids_values[id_] = decode_twos_complement(ids_values[id_], n_bytes) return ids_values def _get_half_turn_homings(self, positions: dict[NameOrID, Value]) -> dict[NameOrID, Value]: """ On Dynamixel Motors: Present_Position = Actual_Position + Homing_Offset """ half_turn_homings = {} for motor, pos in positions.items(): model = self._get_motor_model(motor) max_res = self.model_resolution_table[model] - 1 half_turn_homings[motor] = int(max_res / 2) - pos return half_turn_homings def _split_into_byte_chunks(self, value: int, length: int) -> list[int]: return _split_into_byte_chunks(value, length) def broadcast_ping(self, num_retry: int = 0, raise_on_error: bool = False) -> dict[int, int] | None: for n_try in range(1 + num_retry): data_list, comm = self.packet_handler.broadcastPing(self.port_handler) if self._is_comm_success(comm): break logger.debug(f"Broadcast ping failed on port '{self.port}' ({n_try=})") logger.debug(self.packet_handler.getTxRxResult(comm)) if not self._is_comm_success(comm): if raise_on_error: raise ConnectionError(self.packet_handler.getTxRxResult(comm)) return return {id_: data[0] for id_, data in data_list.items()}
lerobot/src/lerobot/motors/dynamixel/dynamixel.py/0
{ "file_path": "lerobot/src/lerobot/motors/dynamixel/dynamixel.py", "repo_id": "lerobot", "token_count": 4555 }
212
#!/usr/bin/env python # Copyright 2024 Columbia Artificial Intelligence, Robotics Lab, # and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion" TODO(alexander-soare): - Remove reliance on diffusers for DDPMScheduler and LR scheduler. """ import math from collections import deque from collections.abc import Callable import einops import numpy as np import torch import torch.nn.functional as F # noqa: N812 import torchvision from diffusers.schedulers.scheduling_ddim import DDIMScheduler from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from torch import Tensor, nn from lerobot.constants import ACTION, OBS_ENV_STATE, OBS_IMAGES, OBS_STATE from lerobot.policies.diffusion.configuration_diffusion import DiffusionConfig from lerobot.policies.normalize import Normalize, Unnormalize from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.utils import ( get_device_from_parameters, get_dtype_from_parameters, get_output_shape, populate_queues, ) class DiffusionPolicy(PreTrainedPolicy): """ Diffusion Policy as per "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion" (paper: https://huggingface.co/papers/2303.04137, code: https://github.com/real-stanford/diffusion_policy). """ config_class = DiffusionConfig name = "diffusion" def __init__( self, config: DiffusionConfig, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): """ Args: config: Policy configuration class instance or None, in which case the default instantiation of the configuration class is used. dataset_stats: Dataset statistics to be used for normalization. If not passed here, it is expected that they will be passed with a call to `load_state_dict` before the policy is used. """ super().__init__(config) config.validate_features() self.config = config self.normalize_inputs = Normalize(config.input_features, config.normalization_mapping, dataset_stats) self.normalize_targets = Normalize( config.output_features, config.normalization_mapping, dataset_stats ) self.unnormalize_outputs = Unnormalize( config.output_features, config.normalization_mapping, dataset_stats ) # queues are populated during rollout of the policy, they contain the n latest observations and actions self._queues = None self.diffusion = DiffusionModel(config) self.reset() def get_optim_params(self) -> dict: return self.diffusion.parameters() def reset(self): """Clear observation and action queues. Should be called on `env.reset()`""" self._queues = { "observation.state": deque(maxlen=self.config.n_obs_steps), "action": deque(maxlen=self.config.n_action_steps), } if self.config.image_features: self._queues["observation.images"] = deque(maxlen=self.config.n_obs_steps) if self.config.env_state_feature: self._queues["observation.environment_state"] = deque(maxlen=self.config.n_obs_steps) @torch.no_grad() def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations.""" # stack n latest observations from the queue batch = {k: torch.stack(list(self._queues[k]), dim=1) for k in batch if k in self._queues} actions = self.diffusion.generate_actions(batch) # TODO(rcadene): make above methods return output dictionary? actions = self.unnormalize_outputs({ACTION: actions})[ACTION] return actions @torch.no_grad() def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select a single action given environment observations. This method handles caching a history of observations and an action trajectory generated by the underlying diffusion model. Here's how it works: - `n_obs_steps` steps worth of observations are cached (for the first steps, the observation is copied `n_obs_steps` times to fill the cache). - The diffusion model generates `horizon` steps worth of actions. - `n_action_steps` worth of actions are actually kept for execution, starting from the current step. Schematically this looks like: ---------------------------------------------------------------------------------------------- (legend: o = n_obs_steps, h = horizon, a = n_action_steps) |timestep | n-o+1 | n-o+2 | ..... | n | ..... | n+a-1 | n+a | ..... | n-o+h | |observation is used | YES | YES | YES | YES | NO | NO | NO | NO | NO | |action is generated | YES | YES | YES | YES | YES | YES | YES | YES | YES | |action is used | NO | NO | NO | YES | YES | YES | NO | NO | NO | ---------------------------------------------------------------------------------------------- Note that this means we require: `n_action_steps <= horizon - n_obs_steps + 1`. Also, note that "horizon" may not the best name to describe what the variable actually means, because this period is actually measured from the first observation which (if `n_obs_steps` > 1) happened in the past. """ # NOTE: for offline evaluation, we have action in the batch, so we need to pop it out if ACTION in batch: batch.pop(ACTION) batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) # NOTE: It's important that this happens after stacking the images into a single key. self._queues = populate_queues(self._queues, batch) if len(self._queues[ACTION]) == 0: actions = self.predict_action_chunk(batch) self._queues[ACTION].extend(actions.transpose(0, 1)) action = self._queues[ACTION].popleft() return action def forward(self, batch: dict[str, Tensor]) -> tuple[Tensor, None]: """Run the batch through the model and compute the loss for training or validation.""" batch = self.normalize_inputs(batch) if self.config.image_features: batch = dict(batch) # shallow copy so that adding a key doesn't modify the original batch[OBS_IMAGES] = torch.stack([batch[key] for key in self.config.image_features], dim=-4) batch = self.normalize_targets(batch) loss = self.diffusion.compute_loss(batch) # no output_dict so returning None return loss, None def _make_noise_scheduler(name: str, **kwargs: dict) -> DDPMScheduler | DDIMScheduler: """ Factory for noise scheduler instances of the requested type. All kwargs are passed to the scheduler. """ if name == "DDPM": return DDPMScheduler(**kwargs) elif name == "DDIM": return DDIMScheduler(**kwargs) else: raise ValueError(f"Unsupported noise scheduler type {name}") class DiffusionModel(nn.Module): def __init__(self, config: DiffusionConfig): super().__init__() self.config = config # Build observation encoders (depending on which observations are provided). global_cond_dim = self.config.robot_state_feature.shape[0] if self.config.image_features: num_images = len(self.config.image_features) if self.config.use_separate_rgb_encoder_per_camera: encoders = [DiffusionRgbEncoder(config) for _ in range(num_images)] self.rgb_encoder = nn.ModuleList(encoders) global_cond_dim += encoders[0].feature_dim * num_images else: self.rgb_encoder = DiffusionRgbEncoder(config) global_cond_dim += self.rgb_encoder.feature_dim * num_images if self.config.env_state_feature: global_cond_dim += self.config.env_state_feature.shape[0] self.unet = DiffusionConditionalUnet1d(config, global_cond_dim=global_cond_dim * config.n_obs_steps) self.noise_scheduler = _make_noise_scheduler( config.noise_scheduler_type, num_train_timesteps=config.num_train_timesteps, beta_start=config.beta_start, beta_end=config.beta_end, beta_schedule=config.beta_schedule, clip_sample=config.clip_sample, clip_sample_range=config.clip_sample_range, prediction_type=config.prediction_type, ) if config.num_inference_steps is None: self.num_inference_steps = self.noise_scheduler.config.num_train_timesteps else: self.num_inference_steps = config.num_inference_steps # ========= inference ============ def conditional_sample( self, batch_size: int, global_cond: Tensor | None = None, generator: torch.Generator | None = None ) -> Tensor: device = get_device_from_parameters(self) dtype = get_dtype_from_parameters(self) # Sample prior. sample = torch.randn( size=(batch_size, self.config.horizon, self.config.action_feature.shape[0]), dtype=dtype, device=device, generator=generator, ) self.noise_scheduler.set_timesteps(self.num_inference_steps) for t in self.noise_scheduler.timesteps: # Predict model output. model_output = self.unet( sample, torch.full(sample.shape[:1], t, dtype=torch.long, device=sample.device), global_cond=global_cond, ) # Compute previous image: x_t -> x_t-1 sample = self.noise_scheduler.step(model_output, t, sample, generator=generator).prev_sample return sample def _prepare_global_conditioning(self, batch: dict[str, Tensor]) -> Tensor: """Encode image features and concatenate them all together along with the state vector.""" batch_size, n_obs_steps = batch[OBS_STATE].shape[:2] global_cond_feats = [batch[OBS_STATE]] # Extract image features. if self.config.image_features: if self.config.use_separate_rgb_encoder_per_camera: # Combine batch and sequence dims while rearranging to make the camera index dimension first. images_per_camera = einops.rearrange(batch["observation.images"], "b s n ... -> n (b s) ...") img_features_list = torch.cat( [ encoder(images) for encoder, images in zip(self.rgb_encoder, images_per_camera, strict=True) ] ) # Separate batch and sequence dims back out. The camera index dim gets absorbed into the # feature dim (effectively concatenating the camera features). img_features = einops.rearrange( img_features_list, "(n b s) ... -> b s (n ...)", b=batch_size, s=n_obs_steps ) else: # Combine batch, sequence, and "which camera" dims before passing to shared encoder. img_features = self.rgb_encoder( einops.rearrange(batch["observation.images"], "b s n ... -> (b s n) ...") ) # Separate batch dim and sequence dim back out. The camera index dim gets absorbed into the # feature dim (effectively concatenating the camera features). img_features = einops.rearrange( img_features, "(b s n) ... -> b s (n ...)", b=batch_size, s=n_obs_steps ) global_cond_feats.append(img_features) if self.config.env_state_feature: global_cond_feats.append(batch[OBS_ENV_STATE]) # Concatenate features then flatten to (B, global_cond_dim). return torch.cat(global_cond_feats, dim=-1).flatten(start_dim=1) def generate_actions(self, batch: dict[str, Tensor]) -> Tensor: """ This function expects `batch` to have: { "observation.state": (B, n_obs_steps, state_dim) "observation.images": (B, n_obs_steps, num_cameras, C, H, W) AND/OR "observation.environment_state": (B, n_obs_steps, environment_dim) } """ batch_size, n_obs_steps = batch["observation.state"].shape[:2] assert n_obs_steps == self.config.n_obs_steps # Encode image features and concatenate them all together along with the state vector. global_cond = self._prepare_global_conditioning(batch) # (B, global_cond_dim) # run sampling actions = self.conditional_sample(batch_size, global_cond=global_cond) # Extract `n_action_steps` steps worth of actions (from the current observation). start = n_obs_steps - 1 end = start + self.config.n_action_steps actions = actions[:, start:end] return actions def compute_loss(self, batch: dict[str, Tensor]) -> Tensor: """ This function expects `batch` to have (at least): { "observation.state": (B, n_obs_steps, state_dim) "observation.images": (B, n_obs_steps, num_cameras, C, H, W) AND/OR "observation.environment_state": (B, n_obs_steps, environment_dim) "action": (B, horizon, action_dim) "action_is_pad": (B, horizon) } """ # Input validation. assert set(batch).issuperset({"observation.state", "action", "action_is_pad"}) assert "observation.images" in batch or "observation.environment_state" in batch n_obs_steps = batch["observation.state"].shape[1] horizon = batch["action"].shape[1] assert horizon == self.config.horizon assert n_obs_steps == self.config.n_obs_steps # Encode image features and concatenate them all together along with the state vector. global_cond = self._prepare_global_conditioning(batch) # (B, global_cond_dim) # Forward diffusion. trajectory = batch["action"] # Sample noise to add to the trajectory. eps = torch.randn(trajectory.shape, device=trajectory.device) # Sample a random noising timestep for each item in the batch. timesteps = torch.randint( low=0, high=self.noise_scheduler.config.num_train_timesteps, size=(trajectory.shape[0],), device=trajectory.device, ).long() # Add noise to the clean trajectories according to the noise magnitude at each timestep. noisy_trajectory = self.noise_scheduler.add_noise(trajectory, eps, timesteps) # Run the denoising network (that might denoise the trajectory, or attempt to predict the noise). pred = self.unet(noisy_trajectory, timesteps, global_cond=global_cond) # Compute the loss. # The target is either the original trajectory, or the noise. if self.config.prediction_type == "epsilon": target = eps elif self.config.prediction_type == "sample": target = batch["action"] else: raise ValueError(f"Unsupported prediction type {self.config.prediction_type}") loss = F.mse_loss(pred, target, reduction="none") # Mask loss wherever the action is padded with copies (edges of the dataset trajectory). if self.config.do_mask_loss_for_padding: if "action_is_pad" not in batch: raise ValueError( "You need to provide 'action_is_pad' in the batch when " f"{self.config.do_mask_loss_for_padding=}." ) in_episode_bound = ~batch["action_is_pad"] loss = loss * in_episode_bound.unsqueeze(-1) return loss.mean() class SpatialSoftmax(nn.Module): """ Spatial Soft Argmax operation described in "Deep Spatial Autoencoders for Visuomotor Learning" by Finn et al. (https://huggingface.co/papers/1509.06113). A minimal port of the robomimic implementation. At a high level, this takes 2D feature maps (from a convnet/ViT) and returns the "center of mass" of activations of each channel, i.e., keypoints in the image space for the policy to focus on. Example: take feature maps of size (512x10x12). We generate a grid of normalized coordinates (10x12x2): ----------------------------------------------------- | (-1., -1.) | (-0.82, -1.) | ... | (1., -1.) | | (-1., -0.78) | (-0.82, -0.78) | ... | (1., -0.78) | | ... | ... | ... | ... | | (-1., 1.) | (-0.82, 1.) | ... | (1., 1.) | ----------------------------------------------------- This is achieved by applying channel-wise softmax over the activations (512x120) and computing the dot product with the coordinates (120x2) to get expected points of maximal activation (512x2). The example above results in 512 keypoints (corresponding to the 512 input channels). We can optionally provide num_kp != None to control the number of keypoints. This is achieved by a first applying a learnable linear mapping (in_channels, H, W) -> (num_kp, H, W). """ def __init__(self, input_shape, num_kp=None): """ Args: input_shape (list): (C, H, W) input feature map shape. num_kp (int): number of keypoints in output. If None, output will have the same number of channels as input. """ super().__init__() assert len(input_shape) == 3 self._in_c, self._in_h, self._in_w = input_shape if num_kp is not None: self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) self._out_c = num_kp else: self.nets = None self._out_c = self._in_c # we could use torch.linspace directly but that seems to behave slightly differently than numpy # and causes a small degradation in pc_success of pre-trained models. pos_x, pos_y = np.meshgrid(np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h)) pos_x = torch.from_numpy(pos_x.reshape(self._in_h * self._in_w, 1)).float() pos_y = torch.from_numpy(pos_y.reshape(self._in_h * self._in_w, 1)).float() # register as buffer so it's moved to the correct device. self.register_buffer("pos_grid", torch.cat([pos_x, pos_y], dim=1)) def forward(self, features: Tensor) -> Tensor: """ Args: features: (B, C, H, W) input feature maps. Returns: (B, K, 2) image-space coordinates of keypoints. """ if self.nets is not None: features = self.nets(features) # [B, K, H, W] -> [B * K, H * W] where K is number of keypoints features = features.reshape(-1, self._in_h * self._in_w) # 2d softmax normalization attention = F.softmax(features, dim=-1) # [B * K, H * W] x [H * W, 2] -> [B * K, 2] for spatial coordinate mean in x and y dimensions expected_xy = attention @ self.pos_grid # reshape to [B, K, 2] feature_keypoints = expected_xy.view(-1, self._out_c, 2) return feature_keypoints class DiffusionRgbEncoder(nn.Module): """Encodes an RGB image into a 1D feature vector. Includes the ability to normalize and crop the image first. """ def __init__(self, config: DiffusionConfig): super().__init__() # Set up optional preprocessing. if config.crop_shape is not None: self.do_crop = True # Always use center crop for eval self.center_crop = torchvision.transforms.CenterCrop(config.crop_shape) if config.crop_is_random: self.maybe_random_crop = torchvision.transforms.RandomCrop(config.crop_shape) else: self.maybe_random_crop = self.center_crop else: self.do_crop = False # Set up backbone. backbone_model = getattr(torchvision.models, config.vision_backbone)( weights=config.pretrained_backbone_weights ) # Note: This assumes that the layer4 feature map is children()[-3] # TODO(alexander-soare): Use a safer alternative. self.backbone = nn.Sequential(*(list(backbone_model.children())[:-2])) if config.use_group_norm: if config.pretrained_backbone_weights: raise ValueError( "You can't replace BatchNorm in a pretrained model without ruining the weights!" ) self.backbone = _replace_submodules( root_module=self.backbone, predicate=lambda x: isinstance(x, nn.BatchNorm2d), func=lambda x: nn.GroupNorm(num_groups=x.num_features // 16, num_channels=x.num_features), ) # Set up pooling and final layers. # Use a dry run to get the feature map shape. # The dummy input should take the number of image channels from `config.image_features` and it should # use the height and width from `config.crop_shape` if it is provided, otherwise it should use the # height and width from `config.image_features`. # Note: we have a check in the config class to make sure all images have the same shape. images_shape = next(iter(config.image_features.values())).shape dummy_shape_h_w = config.crop_shape if config.crop_shape is not None else images_shape[1:] dummy_shape = (1, images_shape[0], *dummy_shape_h_w) feature_map_shape = get_output_shape(self.backbone, dummy_shape)[1:] self.pool = SpatialSoftmax(feature_map_shape, num_kp=config.spatial_softmax_num_keypoints) self.feature_dim = config.spatial_softmax_num_keypoints * 2 self.out = nn.Linear(config.spatial_softmax_num_keypoints * 2, self.feature_dim) self.relu = nn.ReLU() def forward(self, x: Tensor) -> Tensor: """ Args: x: (B, C, H, W) image tensor with pixel values in [0, 1]. Returns: (B, D) image feature. """ # Preprocess: maybe crop (if it was set up in the __init__). if self.do_crop: if self.training: # noqa: SIM108 x = self.maybe_random_crop(x) else: # Always use center crop for eval. x = self.center_crop(x) # Extract backbone feature. x = torch.flatten(self.pool(self.backbone(x)), start_dim=1) # Final linear layer with non-linearity. x = self.relu(self.out(x)) return x def _replace_submodules( root_module: nn.Module, predicate: Callable[[nn.Module], bool], func: Callable[[nn.Module], nn.Module] ) -> nn.Module: """ Args: root_module: The module for which the submodules need to be replaced predicate: Takes a module as an argument and must return True if the that module is to be replaced. func: Takes a module as an argument and returns a new module to replace it with. Returns: The root module with its submodules replaced. """ if predicate(root_module): return func(root_module) replace_list = [k.split(".") for k, m in root_module.named_modules(remove_duplicate=True) if predicate(m)] for *parents, k in replace_list: parent_module = root_module if len(parents) > 0: parent_module = root_module.get_submodule(".".join(parents)) if isinstance(parent_module, nn.Sequential): src_module = parent_module[int(k)] else: src_module = getattr(parent_module, k) tgt_module = func(src_module) if isinstance(parent_module, nn.Sequential): parent_module[int(k)] = tgt_module else: setattr(parent_module, k, tgt_module) # verify that all BN are replaced assert not any(predicate(m) for _, m in root_module.named_modules(remove_duplicate=True)) return root_module class DiffusionSinusoidalPosEmb(nn.Module): """1D sinusoidal positional embeddings as in Attention is All You Need.""" def __init__(self, dim: int): super().__init__() self.dim = dim def forward(self, x: Tensor) -> Tensor: device = x.device half_dim = self.dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, device=device) * -emb) emb = x.unsqueeze(-1) * emb.unsqueeze(0) emb = torch.cat((emb.sin(), emb.cos()), dim=-1) return emb class DiffusionConv1dBlock(nn.Module): """Conv1d --> GroupNorm --> Mish""" def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): super().__init__() self.block = nn.Sequential( nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), nn.GroupNorm(n_groups, out_channels), nn.Mish(), ) def forward(self, x): return self.block(x) class DiffusionConditionalUnet1d(nn.Module): """A 1D convolutional UNet with FiLM modulation for conditioning. Note: this removes local conditioning as compared to the original diffusion policy code. """ def __init__(self, config: DiffusionConfig, global_cond_dim: int): super().__init__() self.config = config # Encoder for the diffusion timestep. self.diffusion_step_encoder = nn.Sequential( DiffusionSinusoidalPosEmb(config.diffusion_step_embed_dim), nn.Linear(config.diffusion_step_embed_dim, config.diffusion_step_embed_dim * 4), nn.Mish(), nn.Linear(config.diffusion_step_embed_dim * 4, config.diffusion_step_embed_dim), ) # The FiLM conditioning dimension. cond_dim = config.diffusion_step_embed_dim + global_cond_dim # In channels / out channels for each downsampling block in the Unet's encoder. For the decoder, we # just reverse these. in_out = [(config.action_feature.shape[0], config.down_dims[0])] + list( zip(config.down_dims[:-1], config.down_dims[1:], strict=True) ) # Unet encoder. common_res_block_kwargs = { "cond_dim": cond_dim, "kernel_size": config.kernel_size, "n_groups": config.n_groups, "use_film_scale_modulation": config.use_film_scale_modulation, } self.down_modules = nn.ModuleList([]) for ind, (dim_in, dim_out) in enumerate(in_out): is_last = ind >= (len(in_out) - 1) self.down_modules.append( nn.ModuleList( [ DiffusionConditionalResidualBlock1d(dim_in, dim_out, **common_res_block_kwargs), DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs), # Downsample as long as it is not the last block. nn.Conv1d(dim_out, dim_out, 3, 2, 1) if not is_last else nn.Identity(), ] ) ) # Processing in the middle of the auto-encoder. self.mid_modules = nn.ModuleList( [ DiffusionConditionalResidualBlock1d( config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs ), DiffusionConditionalResidualBlock1d( config.down_dims[-1], config.down_dims[-1], **common_res_block_kwargs ), ] ) # Unet decoder. self.up_modules = nn.ModuleList([]) for ind, (dim_out, dim_in) in enumerate(reversed(in_out[1:])): is_last = ind >= (len(in_out) - 1) self.up_modules.append( nn.ModuleList( [ # dim_in * 2, because it takes the encoder's skip connection as well DiffusionConditionalResidualBlock1d(dim_in * 2, dim_out, **common_res_block_kwargs), DiffusionConditionalResidualBlock1d(dim_out, dim_out, **common_res_block_kwargs), # Upsample as long as it is not the last block. nn.ConvTranspose1d(dim_out, dim_out, 4, 2, 1) if not is_last else nn.Identity(), ] ) ) self.final_conv = nn.Sequential( DiffusionConv1dBlock(config.down_dims[0], config.down_dims[0], kernel_size=config.kernel_size), nn.Conv1d(config.down_dims[0], config.action_feature.shape[0], 1), ) def forward(self, x: Tensor, timestep: Tensor | int, global_cond=None) -> Tensor: """ Args: x: (B, T, input_dim) tensor for input to the Unet. timestep: (B,) tensor of (timestep_we_are_denoising_from - 1). global_cond: (B, global_cond_dim) output: (B, T, input_dim) Returns: (B, T, input_dim) diffusion model prediction. """ # For 1D convolutions we'll need feature dimension first. x = einops.rearrange(x, "b t d -> b d t") timesteps_embed = self.diffusion_step_encoder(timestep) # If there is a global conditioning feature, concatenate it to the timestep embedding. if global_cond is not None: global_feature = torch.cat([timesteps_embed, global_cond], axis=-1) else: global_feature = timesteps_embed # Run encoder, keeping track of skip features to pass to the decoder. encoder_skip_features: list[Tensor] = [] for resnet, resnet2, downsample in self.down_modules: x = resnet(x, global_feature) x = resnet2(x, global_feature) encoder_skip_features.append(x) x = downsample(x) for mid_module in self.mid_modules: x = mid_module(x, global_feature) # Run decoder, using the skip features from the encoder. for resnet, resnet2, upsample in self.up_modules: x = torch.cat((x, encoder_skip_features.pop()), dim=1) x = resnet(x, global_feature) x = resnet2(x, global_feature) x = upsample(x) x = self.final_conv(x) x = einops.rearrange(x, "b d t -> b t d") return x class DiffusionConditionalResidualBlock1d(nn.Module): """ResNet style 1D convolutional block with FiLM modulation for conditioning.""" def __init__( self, in_channels: int, out_channels: int, cond_dim: int, kernel_size: int = 3, n_groups: int = 8, # Set to True to do scale modulation with FiLM as well as bias modulation (defaults to False meaning # FiLM just modulates bias). use_film_scale_modulation: bool = False, ): super().__init__() self.use_film_scale_modulation = use_film_scale_modulation self.out_channels = out_channels self.conv1 = DiffusionConv1dBlock(in_channels, out_channels, kernel_size, n_groups=n_groups) # FiLM modulation (https://huggingface.co/papers/1709.07871) outputs per-channel bias and (maybe) scale. cond_channels = out_channels * 2 if use_film_scale_modulation else out_channels self.cond_encoder = nn.Sequential(nn.Mish(), nn.Linear(cond_dim, cond_channels)) self.conv2 = DiffusionConv1dBlock(out_channels, out_channels, kernel_size, n_groups=n_groups) # A final convolution for dimension matching the residual (if needed). self.residual_conv = ( nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity() ) def forward(self, x: Tensor, cond: Tensor) -> Tensor: """ Args: x: (B, in_channels, T) cond: (B, cond_dim) Returns: (B, out_channels, T) """ out = self.conv1(x) # Get condition embedding. Unsqueeze for broadcasting to `out`, resulting in (B, out_channels, 1). cond_embed = self.cond_encoder(cond).unsqueeze(-1) if self.use_film_scale_modulation: # Treat the embedding as a list of scales and biases. scale = cond_embed[:, : self.out_channels] bias = cond_embed[:, self.out_channels :] out = scale * out + bias else: # Treat the embedding as biases. out = out + cond_embed out = self.conv2(out) out = out + self.residual_conv(x) return out
lerobot/src/lerobot/policies/diffusion/modeling_diffusion.py/0
{ "file_path": "lerobot/src/lerobot/policies/diffusion/modeling_diffusion.py", "repo_id": "lerobot", "token_count": 14553 }
213
# !/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode from lerobot.optim.optimizers import AdamWConfig, OptimizerConfig from lerobot.optim.schedulers import LRSchedulerConfig @PreTrainedConfig.register_subclass(name="reward_classifier") @dataclass class RewardClassifierConfig(PreTrainedConfig): """Configuration for the Reward Classifier model.""" name: str = "reward_classifier" num_classes: int = 2 hidden_dim: int = 256 latent_dim: int = 256 image_embedding_pooling_dim: int = 8 dropout_rate: float = 0.1 model_name: str = "helper2424/resnet10" device: str = "cpu" model_type: str = "cnn" # "transformer" or "cnn" num_cameras: int = 2 learning_rate: float = 1e-4 weight_decay: float = 0.01 grad_clip_norm: float = 1.0 normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { "VISUAL": NormalizationMode.MEAN_STD, } ) @property def observation_delta_indices(self) -> list | None: return None @property def action_delta_indices(self) -> list | None: return None @property def reward_delta_indices(self) -> list | None: return None def get_optimizer_preset(self) -> OptimizerConfig: return AdamWConfig( lr=self.learning_rate, weight_decay=self.weight_decay, grad_clip_norm=self.grad_clip_norm, ) def get_scheduler_preset(self) -> LRSchedulerConfig | None: return None def validate_features(self) -> None: """Validate feature configurations.""" has_image = any(key.startswith("observation.image") for key in self.input_features) if not has_image: raise ValueError( "You must provide an image observation (key starting with 'observation.image') in the input features" )
lerobot/src/lerobot/policies/sac/reward_model/configuration_classifier.py/0
{ "file_path": "lerobot/src/lerobot/policies/sac/reward_model/configuration_classifier.py", "repo_id": "lerobot", "token_count": 973 }
214
from __future__ import annotations from collections.abc import Mapping from dataclasses import dataclass, field from typing import Any import numpy as np import torch from torch import Tensor from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.processor.pipeline import EnvTransition, ProcessorStepRegistry, TransitionKey def _convert_stats_to_tensors(stats: dict[str, dict[str, Any]]) -> dict[str, dict[str, Tensor]]: """Convert numpy arrays and other types to torch tensors.""" tensor_stats: dict[str, dict[str, Tensor]] = {} for key, sub in stats.items(): tensor_stats[key] = {} for stat_name, value in sub.items(): if isinstance(value, np.ndarray): tensor_val = torch.from_numpy(value.astype(np.float32)) elif isinstance(value, torch.Tensor): tensor_val = value.to(dtype=torch.float32) elif isinstance(value, (int, float, list, tuple)): tensor_val = torch.tensor(value, dtype=torch.float32) else: raise TypeError(f"Unsupported type for stats['{key}']['{stat_name}']: {type(value)}") tensor_stats[key][stat_name] = tensor_val return tensor_stats @dataclass @ProcessorStepRegistry.register(name="normalizer_processor") class NormalizerProcessor: """Normalizes observations and actions in a single processor step. This processor handles normalization of both observation and action tensors using either mean/std normalization or min/max scaling to a [-1, 1] range. For each tensor key in the stats dictionary, the processor will: - Use mean/std normalization if those statistics are provided: (x - mean) / std - Use min/max scaling if those statistics are provided: 2 * (x - min) / (max - min) - 1 The processor can be configured to normalize only specific keys by setting the normalize_keys parameter. """ # Features and normalisation map are mandatory to match the design of normalize.py features: dict[str, PolicyFeature] norm_map: dict[FeatureType, NormalizationMode] # Pre-computed statistics coming from dataset.meta.stats for instance. stats: dict[str, dict[str, Any]] | None = None # Explicit subset of keys to normalise. If ``None`` every key (except # "action") found in ``stats`` will be normalised. Using a ``set`` makes # membership checks O(1). normalize_keys: set[str] | None = None eps: float = 1e-8 _tensor_stats: dict[str, dict[str, Tensor]] = field(default_factory=dict, init=False, repr=False) @classmethod def from_lerobot_dataset( cls, dataset: LeRobotDataset, features: dict[str, PolicyFeature], norm_map: dict[FeatureType, NormalizationMode], *, normalize_keys: set[str] | None = None, eps: float = 1e-8, ) -> NormalizerProcessor: """Factory helper that pulls statistics from a :class:`LeRobotDataset`. The features and norm_map parameters are mandatory to match the design pattern used in normalize.py. """ return cls( features=features, norm_map=norm_map, stats=dataset.meta.stats, normalize_keys=normalize_keys, eps=eps, ) def __post_init__(self): # Handle deserialization from JSON config if self.features and isinstance(list(self.features.values())[0], dict): # Features came from JSON - need to reconstruct PolicyFeature objects reconstructed_features = {} for key, ft_dict in self.features.items(): reconstructed_features[key] = PolicyFeature( type=FeatureType(ft_dict["type"]), shape=tuple(ft_dict["shape"]) ) self.features = reconstructed_features if self.norm_map and isinstance(list(self.norm_map.keys())[0], str): # norm_map came from JSON - need to reconstruct enum keys and values reconstructed_norm_map = {} for ft_type_str, norm_mode_str in self.norm_map.items(): reconstructed_norm_map[FeatureType(ft_type_str)] = NormalizationMode(norm_mode_str) self.norm_map = reconstructed_norm_map # Convert statistics once so we avoid repeated numpy→Tensor conversions # during runtime. self.stats = self.stats or {} self._tensor_stats = _convert_stats_to_tensors(self.stats) # Ensure *normalize_keys* is a set for fast look-ups and compare by # value later when returning the configuration. if self.normalize_keys is not None and not isinstance(self.normalize_keys, set): self.normalize_keys = set(self.normalize_keys) def _normalize_obs(self, observation): if observation is None: return None # Decide which keys should be normalised for this call. if self.normalize_keys is not None: keys_to_norm = self.normalize_keys else: # Use feature map to skip action keys. keys_to_norm = {k for k, ft in self.features.items() if ft.type is not FeatureType.ACTION} processed = dict(observation) for key in keys_to_norm: if key not in processed or key not in self._tensor_stats: continue orig_val = processed[key] tensor = ( orig_val.to(dtype=torch.float32) if isinstance(orig_val, torch.Tensor) else torch.as_tensor(orig_val, dtype=torch.float32) ) stats = {k: v.to(tensor.device) for k, v in self._tensor_stats[key].items()} if "mean" in stats and "std" in stats: mean, std = stats["mean"], stats["std"] processed[key] = (tensor - mean) / (std + self.eps) elif "min" in stats and "max" in stats: min_val, max_val = stats["min"], stats["max"] processed[key] = 2 * (tensor - min_val) / (max_val - min_val + self.eps) - 1 return processed def _normalize_action(self, action): if action is None or "action" not in self._tensor_stats: return action tensor = ( action.to(dtype=torch.float32) if isinstance(action, torch.Tensor) else torch.as_tensor(action, dtype=torch.float32) ) stats = {k: v.to(tensor.device) for k, v in self._tensor_stats["action"].items()} if "mean" in stats and "std" in stats: mean, std = stats["mean"], stats["std"] return (tensor - mean) / (std + self.eps) if "min" in stats and "max" in stats: min_val, max_val = stats["min"], stats["max"] return 2 * (tensor - min_val) / (max_val - min_val + self.eps) - 1 raise ValueError("Action stats must contain either ('mean','std') or ('min','max')") def __call__(self, transition: EnvTransition) -> EnvTransition: observation = self._normalize_obs(transition.get(TransitionKey.OBSERVATION)) action = self._normalize_action(transition.get(TransitionKey.ACTION)) # Create a new transition with normalized values new_transition = transition.copy() new_transition[TransitionKey.OBSERVATION] = observation new_transition[TransitionKey.ACTION] = action return new_transition def get_config(self) -> dict[str, Any]: config = { "eps": self.eps, "features": { key: {"type": ft.type.value, "shape": ft.shape} for key, ft in self.features.items() }, "norm_map": {ft_type.value: norm_mode.value for ft_type, norm_mode in self.norm_map.items()}, } if self.normalize_keys is not None: # Serialise as a list for YAML / JSON friendliness config["normalize_keys"] = sorted(self.normalize_keys) return config def state_dict(self) -> dict[str, Tensor]: flat = {} for key, sub in self._tensor_stats.items(): for stat_name, tensor in sub.items(): flat[f"{key}.{stat_name}"] = tensor return flat def load_state_dict(self, state: Mapping[str, Tensor]) -> None: self._tensor_stats.clear() for flat_key, tensor in state.items(): key, stat_name = flat_key.rsplit(".", 1) self._tensor_stats.setdefault(key, {})[stat_name] = tensor def reset(self): pass def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]: return features @dataclass @ProcessorStepRegistry.register(name="unnormalizer_processor") class UnnormalizerProcessor: """Inverse normalisation for observations and actions. Exactly mirrors :class:`NormalizerProcessor` but applies the inverse transform. """ features: dict[str, PolicyFeature] norm_map: dict[FeatureType, NormalizationMode] stats: dict[str, dict[str, Any]] | None = None _tensor_stats: dict[str, dict[str, Tensor]] = field(default_factory=dict, init=False, repr=False) @classmethod def from_lerobot_dataset( cls, dataset: LeRobotDataset, features: dict[str, PolicyFeature], norm_map: dict[FeatureType, NormalizationMode], ) -> UnnormalizerProcessor: return cls(features=features, norm_map=norm_map, stats=dataset.meta.stats) def __post_init__(self): # Handle deserialization from JSON config if self.features and isinstance(list(self.features.values())[0], dict): # Features came from JSON - need to reconstruct PolicyFeature objects reconstructed_features = {} for key, ft_dict in self.features.items(): reconstructed_features[key] = PolicyFeature( type=FeatureType(ft_dict["type"]), shape=tuple(ft_dict["shape"]) ) self.features = reconstructed_features if self.norm_map and isinstance(list(self.norm_map.keys())[0], str): # norm_map came from JSON - need to reconstruct enum keys and values reconstructed_norm_map = {} for ft_type_str, norm_mode_str in self.norm_map.items(): reconstructed_norm_map[FeatureType(ft_type_str)] = NormalizationMode(norm_mode_str) self.norm_map = reconstructed_norm_map self.stats = self.stats or {} self._tensor_stats = _convert_stats_to_tensors(self.stats) def _unnormalize_obs(self, observation): if observation is None: return None keys = [k for k, ft in self.features.items() if ft.type is not FeatureType.ACTION] processed = dict(observation) for key in keys: if key not in processed or key not in self._tensor_stats: continue orig_val = processed[key] tensor = ( orig_val.to(dtype=torch.float32) if isinstance(orig_val, torch.Tensor) else torch.as_tensor(orig_val, dtype=torch.float32) ) stats = {k: v.to(tensor.device) for k, v in self._tensor_stats[key].items()} if "mean" in stats and "std" in stats: mean, std = stats["mean"], stats["std"] processed[key] = tensor * std + mean elif "min" in stats and "max" in stats: min_val, max_val = stats["min"], stats["max"] processed[key] = (tensor + 1) / 2 * (max_val - min_val) + min_val return processed def _unnormalize_action(self, action): if action is None or "action" not in self._tensor_stats: return action tensor = ( action.to(dtype=torch.float32) if isinstance(action, torch.Tensor) else torch.as_tensor(action, dtype=torch.float32) ) stats = {k: v.to(tensor.device) for k, v in self._tensor_stats["action"].items()} if "mean" in stats and "std" in stats: mean, std = stats["mean"], stats["std"] return tensor * std + mean if "min" in stats and "max" in stats: min_val, max_val = stats["min"], stats["max"] return (tensor + 1) / 2 * (max_val - min_val) + min_val raise ValueError("Action stats must contain either ('mean','std') or ('min','max')") def __call__(self, transition: EnvTransition) -> EnvTransition: observation = self._unnormalize_obs(transition.get(TransitionKey.OBSERVATION)) action = self._unnormalize_action(transition.get(TransitionKey.ACTION)) # Create a new transition with unnormalized values new_transition = transition.copy() new_transition[TransitionKey.OBSERVATION] = observation new_transition[TransitionKey.ACTION] = action return new_transition def get_config(self) -> dict[str, Any]: return { "features": { key: {"type": ft.type.value, "shape": ft.shape} for key, ft in self.features.items() }, "norm_map": {ft_type.value: norm_mode.value for ft_type, norm_mode in self.norm_map.items()}, } def state_dict(self) -> dict[str, Tensor]: flat = {} for key, sub in self._tensor_stats.items(): for stat_name, tensor in sub.items(): flat[f"{key}.{stat_name}"] = tensor return flat def load_state_dict(self, state: Mapping[str, Tensor]) -> None: self._tensor_stats.clear() for flat_key, tensor in state.items(): key, stat_name = flat_key.rsplit(".", 1) self._tensor_stats.setdefault(key, {})[stat_name] = tensor def reset(self): pass def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]: return features
lerobot/src/lerobot/processor/normalize_processor.py/0
{ "file_path": "lerobot/src/lerobot/processor/normalize_processor.py", "repo_id": "lerobot", "token_count": 5938 }
215
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Actor server runner for distributed HILSerl robot policy training. This script implements the actor component of the distributed HILSerl architecture. It executes the policy in the robot environment, collects experience, and sends transitions to the learner server for policy updates. Examples of usage: - Start an actor server for real robot training with human-in-the-loop intervention: ```bash python -m lerobot.scripts.rl.actor --config_path src/lerobot/configs/train_config_hilserl_so100.json ``` **NOTE**: The actor server requires a running learner server to connect to. Ensure the learner server is started before launching the actor. **NOTE**: Human intervention is key to HILSerl training. Press the upper right trigger button on the gamepad to take control of the robot during training. Initially intervene frequently, then gradually reduce interventions as the policy improves. **WORKFLOW**: 1. Determine robot workspace bounds using `find_joint_limits.py` 2. Record demonstrations with `gym_manipulator.py` in record mode 3. Process the dataset and determine camera crops with `crop_dataset_roi.py` 4. Start the learner server with the training configuration 5. Start this actor server with the same configuration 6. Use human interventions to guide policy learning For more details on the complete HILSerl training workflow, see: https://github.com/michel-aractingi/lerobot-hilserl-guide """ import logging import os import time from functools import lru_cache from queue import Empty import grpc import torch from torch import nn from torch.multiprocessing import Event, Queue from lerobot.cameras import opencv # noqa: F401 from lerobot.configs import parser from lerobot.configs.train import TrainRLServerPipelineConfig from lerobot.policies.factory import make_policy from lerobot.policies.sac.modeling_sac import SACPolicy from lerobot.robots import so100_follower # noqa: F401 from lerobot.scripts.rl.gym_manipulator import make_robot_env from lerobot.teleoperators import gamepad, so101_leader # noqa: F401 from lerobot.transport import services_pb2, services_pb2_grpc from lerobot.transport.utils import ( bytes_to_state_dict, grpc_channel_options, python_object_to_bytes, receive_bytes_in_chunks, send_bytes_in_chunks, transitions_to_bytes, ) from lerobot.utils.process import ProcessSignalHandler from lerobot.utils.queue import get_last_item_from_queue from lerobot.utils.random_utils import set_seed from lerobot.utils.robot_utils import busy_wait from lerobot.utils.transition import ( Transition, move_state_dict_to_device, move_transition_to_device, ) from lerobot.utils.utils import ( TimerManager, get_safe_torch_device, init_logging, ) ACTOR_SHUTDOWN_TIMEOUT = 30 ################################################# # Main entry point # ################################################# @parser.wrap() def actor_cli(cfg: TrainRLServerPipelineConfig): cfg.validate() display_pid = False if not use_threads(cfg): import torch.multiprocessing as mp mp.set_start_method("spawn") display_pid = True # Create logs directory to ensure it exists log_dir = os.path.join(cfg.output_dir, "logs") os.makedirs(log_dir, exist_ok=True) log_file = os.path.join(log_dir, f"actor_{cfg.job_name}.log") # Initialize logging with explicit log file init_logging(log_file=log_file, display_pid=display_pid) logging.info(f"Actor logging initialized, writing to {log_file}") is_threaded = use_threads(cfg) shutdown_event = ProcessSignalHandler(is_threaded, display_pid=display_pid).shutdown_event learner_client, grpc_channel = learner_service_client( host=cfg.policy.actor_learner_config.learner_host, port=cfg.policy.actor_learner_config.learner_port, ) logging.info("[ACTOR] Establishing connection with Learner") if not establish_learner_connection(learner_client, shutdown_event): logging.error("[ACTOR] Failed to establish connection with Learner") return if not use_threads(cfg): # If we use multithreading, we can reuse the channel grpc_channel.close() grpc_channel = None logging.info("[ACTOR] Connection with Learner established") parameters_queue = Queue() transitions_queue = Queue() interactions_queue = Queue() concurrency_entity = None if use_threads(cfg): from threading import Thread concurrency_entity = Thread else: from multiprocessing import Process concurrency_entity = Process receive_policy_process = concurrency_entity( target=receive_policy, args=(cfg, parameters_queue, shutdown_event, grpc_channel), daemon=True, ) transitions_process = concurrency_entity( target=send_transitions, args=(cfg, transitions_queue, shutdown_event, grpc_channel), daemon=True, ) interactions_process = concurrency_entity( target=send_interactions, args=(cfg, interactions_queue, shutdown_event, grpc_channel), daemon=True, ) transitions_process.start() interactions_process.start() receive_policy_process.start() act_with_policy( cfg=cfg, shutdown_event=shutdown_event, parameters_queue=parameters_queue, transitions_queue=transitions_queue, interactions_queue=interactions_queue, ) logging.info("[ACTOR] Policy process joined") logging.info("[ACTOR] Closing queues") transitions_queue.close() interactions_queue.close() parameters_queue.close() transitions_process.join() logging.info("[ACTOR] Transitions process joined") interactions_process.join() logging.info("[ACTOR] Interactions process joined") receive_policy_process.join() logging.info("[ACTOR] Receive policy process joined") logging.info("[ACTOR] join queues") transitions_queue.cancel_join_thread() interactions_queue.cancel_join_thread() parameters_queue.cancel_join_thread() logging.info("[ACTOR] queues closed") ################################################# # Core algorithm functions # ################################################# def act_with_policy( cfg: TrainRLServerPipelineConfig, shutdown_event: any, # Event, parameters_queue: Queue, transitions_queue: Queue, interactions_queue: Queue, ): """ Executes policy interaction within the environment. This function rolls out the policy in the environment, collecting interaction data and pushing it to a queue for streaming to the learner. Once an episode is completed, updated network parameters received from the learner are retrieved from a queue and loaded into the network. Args: cfg: Configuration settings for the interaction process. shutdown_event: Event to check if the process should shutdown. parameters_queue: Queue to receive updated network parameters from the learner. transitions_queue: Queue to send transitions to the learner. interactions_queue: Queue to send interactions to the learner. """ # Initialize logging for multiprocessing if not use_threads(cfg): log_dir = os.path.join(cfg.output_dir, "logs") os.makedirs(log_dir, exist_ok=True) log_file = os.path.join(log_dir, f"actor_policy_{os.getpid()}.log") init_logging(log_file=log_file, display_pid=True) logging.info("Actor policy process logging initialized") logging.info("make_env online") online_env = make_robot_env(cfg=cfg.env) set_seed(cfg.seed) device = get_safe_torch_device(cfg.policy.device, log=True) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True logging.info("make_policy") ### Instantiate the policy in both the actor and learner processes ### To avoid sending a SACPolicy object through the port, we create a policy instance ### on both sides, the learner sends the updated parameters every n steps to update the actor's parameters policy: SACPolicy = make_policy( cfg=cfg.policy, env_cfg=cfg.env, ) policy = policy.eval() assert isinstance(policy, nn.Module) obs, info = online_env.reset() # NOTE: For the moment we will solely handle the case of a single environment sum_reward_episode = 0 list_transition_to_send_to_learner = [] episode_intervention = False # Add counters for intervention rate calculation episode_intervention_steps = 0 episode_total_steps = 0 policy_timer = TimerManager("Policy inference", log=False) for interaction_step in range(cfg.policy.online_steps): start_time = time.perf_counter() if shutdown_event.is_set(): logging.info("[ACTOR] Shutting down act_with_policy") return if interaction_step >= cfg.policy.online_step_before_learning: # Time policy inference and check if it meets FPS requirement with policy_timer: action = policy.select_action(batch=obs) policy_fps = policy_timer.fps_last log_policy_frequency_issue(policy_fps=policy_fps, cfg=cfg, interaction_step=interaction_step) else: action = online_env.action_space.sample() next_obs, reward, done, truncated, info = online_env.step(action) sum_reward_episode += float(reward) # Increment total steps counter for intervention rate episode_total_steps += 1 # NOTE: We override the action if the intervention is True, because the action applied is the intervention action if "is_intervention" in info and info["is_intervention"]: # NOTE: The action space for demonstration before hand is with the full action space # but sometimes for example we want to deactivate the gripper action = info["action_intervention"] episode_intervention = True # Increment intervention steps counter episode_intervention_steps += 1 list_transition_to_send_to_learner.append( Transition( state=obs, action=action, reward=reward, next_state=next_obs, done=done, truncated=truncated, # TODO: (azouitine) Handle truncation properly complementary_info=info, ) ) # assign obs to the next obs and continue the rollout obs = next_obs if done or truncated: logging.info(f"[ACTOR] Global step {interaction_step}: Episode reward: {sum_reward_episode}") update_policy_parameters(policy=policy, parameters_queue=parameters_queue, device=device) if len(list_transition_to_send_to_learner) > 0: push_transitions_to_transport_queue( transitions=list_transition_to_send_to_learner, transitions_queue=transitions_queue, ) list_transition_to_send_to_learner = [] stats = get_frequency_stats(policy_timer) policy_timer.reset() # Calculate intervention rate intervention_rate = 0.0 if episode_total_steps > 0: intervention_rate = episode_intervention_steps / episode_total_steps # Send episodic reward to the learner interactions_queue.put( python_object_to_bytes( { "Episodic reward": sum_reward_episode, "Interaction step": interaction_step, "Episode intervention": int(episode_intervention), "Intervention rate": intervention_rate, **stats, } ) ) # Reset intervention counters sum_reward_episode = 0.0 episode_intervention = False episode_intervention_steps = 0 episode_total_steps = 0 obs, info = online_env.reset() if cfg.env.fps is not None: dt_time = time.perf_counter() - start_time busy_wait(1 / cfg.env.fps - dt_time) ################################################# # Communication Functions - Group all gRPC/messaging functions # ################################################# def establish_learner_connection( stub: services_pb2_grpc.LearnerServiceStub, shutdown_event: Event, # type: ignore attempts: int = 30, ): """Establish a connection with the learner. Args: stub (services_pb2_grpc.LearnerServiceStub): The stub to use for the connection. shutdown_event (Event): The event to check if the connection should be established. attempts (int): The number of attempts to establish the connection. Returns: bool: True if the connection is established, False otherwise. """ for _ in range(attempts): if shutdown_event.is_set(): logging.info("[ACTOR] Shutting down establish_learner_connection") return False # Force a connection attempt and check state try: logging.info("[ACTOR] Send ready message to Learner") if stub.Ready(services_pb2.Empty()) == services_pb2.Empty(): return True except grpc.RpcError as e: logging.error(f"[ACTOR] Waiting for Learner to be ready... {e}") time.sleep(2) return False @lru_cache(maxsize=1) def learner_service_client( host: str = "127.0.0.1", port: int = 50051, ) -> tuple[services_pb2_grpc.LearnerServiceStub, grpc.Channel]: """ Returns a client for the learner service. GRPC uses HTTP/2, which is a binary protocol and multiplexes requests over a single connection. So we need to create only one client and reuse it. """ channel = grpc.insecure_channel( f"{host}:{port}", grpc_channel_options(), ) stub = services_pb2_grpc.LearnerServiceStub(channel) logging.info("[ACTOR] Learner service client created") return stub, channel def receive_policy( cfg: TrainRLServerPipelineConfig, parameters_queue: Queue, shutdown_event: Event, # type: ignore learner_client: services_pb2_grpc.LearnerServiceStub | None = None, grpc_channel: grpc.Channel | None = None, ): """Receive parameters from the learner. Args: cfg (TrainRLServerPipelineConfig): The configuration for the actor. parameters_queue (Queue): The queue to receive the parameters. shutdown_event (Event): The event to check if the process should shutdown. """ logging.info("[ACTOR] Start receiving parameters from the Learner") if not use_threads(cfg): # Create a process-specific log file log_dir = os.path.join(cfg.output_dir, "logs") os.makedirs(log_dir, exist_ok=True) log_file = os.path.join(log_dir, f"actor_receive_policy_{os.getpid()}.log") # Initialize logging with explicit log file init_logging(log_file=log_file, display_pid=True) logging.info("Actor receive policy process logging initialized") # Setup process handlers to handle shutdown signal # But use shutdown event from the main process _ = ProcessSignalHandler(use_threads=False, display_pid=True) if grpc_channel is None or learner_client is None: learner_client, grpc_channel = learner_service_client( host=cfg.policy.actor_learner_config.learner_host, port=cfg.policy.actor_learner_config.learner_port, ) try: iterator = learner_client.StreamParameters(services_pb2.Empty()) receive_bytes_in_chunks( iterator, parameters_queue, shutdown_event, log_prefix="[ACTOR] parameters", ) except grpc.RpcError as e: logging.error(f"[ACTOR] gRPC error: {e}") if not use_threads(cfg): grpc_channel.close() logging.info("[ACTOR] Received policy loop stopped") def send_transitions( cfg: TrainRLServerPipelineConfig, transitions_queue: Queue, shutdown_event: any, # Event, learner_client: services_pb2_grpc.LearnerServiceStub | None = None, grpc_channel: grpc.Channel | None = None, ) -> services_pb2.Empty: """ Sends transitions to the learner. This function continuously retrieves messages from the queue and processes: - Transition Data: - A batch of transitions (observation, action, reward, next observation) is collected. - Transitions are moved to the CPU and serialized using PyTorch. - The serialized data is wrapped in a `services_pb2.Transition` message and sent to the learner. """ if not use_threads(cfg): # Create a process-specific log file log_dir = os.path.join(cfg.output_dir, "logs") os.makedirs(log_dir, exist_ok=True) log_file = os.path.join(log_dir, f"actor_transitions_{os.getpid()}.log") # Initialize logging with explicit log file init_logging(log_file=log_file, display_pid=True) logging.info("Actor transitions process logging initialized") if grpc_channel is None or learner_client is None: learner_client, grpc_channel = learner_service_client( host=cfg.policy.actor_learner_config.learner_host, port=cfg.policy.actor_learner_config.learner_port, ) try: learner_client.SendTransitions( transitions_stream( shutdown_event, transitions_queue, cfg.policy.actor_learner_config.queue_get_timeout ) ) except grpc.RpcError as e: logging.error(f"[ACTOR] gRPC error: {e}") logging.info("[ACTOR] Finished streaming transitions") if not use_threads(cfg): grpc_channel.close() logging.info("[ACTOR] Transitions process stopped") def send_interactions( cfg: TrainRLServerPipelineConfig, interactions_queue: Queue, shutdown_event: Event, # type: ignore learner_client: services_pb2_grpc.LearnerServiceStub | None = None, grpc_channel: grpc.Channel | None = None, ) -> services_pb2.Empty: """ Sends interactions to the learner. This function continuously retrieves messages from the queue and processes: - Interaction Messages: - Contains useful statistics about episodic rewards and policy timings. - The message is serialized using `pickle` and sent to the learner. """ if not use_threads(cfg): # Create a process-specific log file log_dir = os.path.join(cfg.output_dir, "logs") os.makedirs(log_dir, exist_ok=True) log_file = os.path.join(log_dir, f"actor_interactions_{os.getpid()}.log") # Initialize logging with explicit log file init_logging(log_file=log_file, display_pid=True) logging.info("Actor interactions process logging initialized") # Setup process handlers to handle shutdown signal # But use shutdown event from the main process _ = ProcessSignalHandler(use_threads=False, display_pid=True) if grpc_channel is None or learner_client is None: learner_client, grpc_channel = learner_service_client( host=cfg.policy.actor_learner_config.learner_host, port=cfg.policy.actor_learner_config.learner_port, ) try: learner_client.SendInteractions( interactions_stream( shutdown_event, interactions_queue, cfg.policy.actor_learner_config.queue_get_timeout ) ) except grpc.RpcError as e: logging.error(f"[ACTOR] gRPC error: {e}") logging.info("[ACTOR] Finished streaming interactions") if not use_threads(cfg): grpc_channel.close() logging.info("[ACTOR] Interactions process stopped") def transitions_stream(shutdown_event: Event, transitions_queue: Queue, timeout: float) -> services_pb2.Empty: # type: ignore while not shutdown_event.is_set(): try: message = transitions_queue.get(block=True, timeout=timeout) except Empty: logging.debug("[ACTOR] Transition queue is empty") continue yield from send_bytes_in_chunks( message, services_pb2.Transition, log_prefix="[ACTOR] Send transitions" ) return services_pb2.Empty() def interactions_stream( shutdown_event: Event, interactions_queue: Queue, timeout: float, # type: ignore ) -> services_pb2.Empty: while not shutdown_event.is_set(): try: message = interactions_queue.get(block=True, timeout=timeout) except Empty: logging.debug("[ACTOR] Interaction queue is empty") continue yield from send_bytes_in_chunks( message, services_pb2.InteractionMessage, log_prefix="[ACTOR] Send interactions", ) return services_pb2.Empty() ################################################# # Policy functions # ################################################# def update_policy_parameters(policy: SACPolicy, parameters_queue: Queue, device): bytes_state_dict = get_last_item_from_queue(parameters_queue, block=False) if bytes_state_dict is not None: logging.info("[ACTOR] Load new parameters from Learner.") state_dicts = bytes_to_state_dict(bytes_state_dict) # TODO: check encoder parameter synchronization possible issues: # 1. When shared_encoder=True, we're loading stale encoder params from actor's state_dict # instead of the updated encoder params from critic (which is optimized separately) # 2. When freeze_vision_encoder=True, we waste bandwidth sending/loading frozen params # 3. Need to handle encoder params correctly for both actor and discrete_critic # Potential fixes: # - Send critic's encoder state when shared_encoder=True # - Skip encoder params entirely when freeze_vision_encoder=True # - Ensure discrete_critic gets correct encoder state (currently uses encoder_critic) # Load actor state dict actor_state_dict = move_state_dict_to_device(state_dicts["policy"], device=device) policy.actor.load_state_dict(actor_state_dict) # Load discrete critic if present if hasattr(policy, "discrete_critic") and "discrete_critic" in state_dicts: discrete_critic_state_dict = move_state_dict_to_device( state_dicts["discrete_critic"], device=device ) policy.discrete_critic.load_state_dict(discrete_critic_state_dict) logging.info("[ACTOR] Loaded discrete critic parameters from Learner.") ################################################# # Utilities functions # ################################################# def push_transitions_to_transport_queue(transitions: list, transitions_queue): """Send transitions to learner in smaller chunks to avoid network issues. Args: transitions: List of transitions to send message_queue: Queue to send messages to learner chunk_size: Size of each chunk to send """ transition_to_send_to_learner = [] for transition in transitions: tr = move_transition_to_device(transition=transition, device="cpu") for key, value in tr["state"].items(): if torch.isnan(value).any(): logging.warning(f"Found NaN values in transition {key}") transition_to_send_to_learner.append(tr) transitions_queue.put(transitions_to_bytes(transition_to_send_to_learner)) def get_frequency_stats(timer: TimerManager) -> dict[str, float]: """Get the frequency statistics of the policy. Args: timer (TimerManager): The timer with collected metrics. Returns: dict[str, float]: The frequency statistics of the policy. """ stats = {} if timer.count > 1: avg_fps = timer.fps_avg p90_fps = timer.fps_percentile(90) logging.debug(f"[ACTOR] Average policy frame rate: {avg_fps}") logging.debug(f"[ACTOR] Policy frame rate 90th percentile: {p90_fps}") stats = { "Policy frequency [Hz]": avg_fps, "Policy frequency 90th-p [Hz]": p90_fps, } return stats def log_policy_frequency_issue(policy_fps: float, cfg: TrainRLServerPipelineConfig, interaction_step: int): if policy_fps < cfg.env.fps: logging.warning( f"[ACTOR] Policy FPS {policy_fps:.1f} below required {cfg.env.fps} at step {interaction_step}" ) def use_threads(cfg: TrainRLServerPipelineConfig) -> bool: return cfg.policy.concurrency.actor == "threads" if __name__ == "__main__": actor_cli()
lerobot/src/lerobot/scripts/rl/actor.py/0
{ "file_path": "lerobot/src/lerobot/scripts/rl/actor.py", "repo_id": "lerobot", "token_count": 9592 }
216
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple script to control a robot from teleoperation. Example: ```shell lerobot-teleoperate \ --robot.type=so101_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \ --robot.id=black \ --teleop.type=so101_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ --teleop.id=blue \ --display_data=true ``` Example teleoperation with bimanual so100: ```shell lerobot-teleoperate \ --robot.type=bi_so100_follower \ --robot.left_arm_port=/dev/tty.usbmodem5A460851411 \ --robot.right_arm_port=/dev/tty.usbmodem5A460812391 \ --robot.id=bimanual_follower \ --robot.cameras='{ left: {"type": "opencv", "index_or_path": 0, "width": 1920, "height": 1080, "fps": 30}, top: {"type": "opencv", "index_or_path": 1, "width": 1920, "height": 1080, "fps": 30}, right: {"type": "opencv", "index_or_path": 2, "width": 1920, "height": 1080, "fps": 30} }' \ --teleop.type=bi_so100_leader \ --teleop.left_arm_port=/dev/tty.usbmodem5A460828611 \ --teleop.right_arm_port=/dev/tty.usbmodem5A460826981 \ --teleop.id=bimanual_leader \ --display_data=true ``` """ import logging import time from dataclasses import asdict, dataclass from pprint import pformat import draccus import rerun as rr from lerobot.cameras.opencv.configuration_opencv import OpenCVCameraConfig # noqa: F401 from lerobot.cameras.realsense.configuration_realsense import RealSenseCameraConfig # noqa: F401 from lerobot.robots import ( # noqa: F401 Robot, RobotConfig, bi_so100_follower, hope_jr, koch_follower, make_robot_from_config, so100_follower, so101_follower, ) from lerobot.teleoperators import ( # noqa: F401 Teleoperator, TeleoperatorConfig, bi_so100_leader, gamepad, homunculus, koch_leader, make_teleoperator_from_config, so100_leader, so101_leader, ) from lerobot.utils.robot_utils import busy_wait from lerobot.utils.utils import init_logging, move_cursor_up from lerobot.utils.visualization_utils import _init_rerun, log_rerun_data @dataclass class TeleoperateConfig: # TODO: pepijn, steven: if more robots require multiple teleoperators (like lekiwi) its good to make this possibele in teleop.py and record.py with List[Teleoperator] teleop: TeleoperatorConfig robot: RobotConfig # Limit the maximum frames per second. fps: int = 60 teleop_time_s: float | None = None # Display all cameras on screen display_data: bool = False def teleop_loop( teleop: Teleoperator, robot: Robot, fps: int, display_data: bool = False, duration: float | None = None ): display_len = max(len(key) for key in robot.action_features) start = time.perf_counter() while True: loop_start = time.perf_counter() action = teleop.get_action() if display_data: observation = robot.get_observation() log_rerun_data(observation, action) robot.send_action(action) dt_s = time.perf_counter() - loop_start busy_wait(1 / fps - dt_s) loop_s = time.perf_counter() - loop_start print("\n" + "-" * (display_len + 10)) print(f"{'NAME':<{display_len}} | {'NORM':>7}") for motor, value in action.items(): print(f"{motor:<{display_len}} | {value:>7.2f}") print(f"\ntime: {loop_s * 1e3:.2f}ms ({1 / loop_s:.0f} Hz)") if duration is not None and time.perf_counter() - start >= duration: return move_cursor_up(len(action) + 5) @draccus.wrap() def teleoperate(cfg: TeleoperateConfig): init_logging() logging.info(pformat(asdict(cfg))) if cfg.display_data: _init_rerun(session_name="teleoperation") teleop = make_teleoperator_from_config(cfg.teleop) robot = make_robot_from_config(cfg.robot) teleop.connect() robot.connect() try: teleop_loop(teleop, robot, cfg.fps, display_data=cfg.display_data, duration=cfg.teleop_time_s) except KeyboardInterrupt: pass finally: if cfg.display_data: rr.rerun_shutdown() teleop.disconnect() robot.disconnect() def main(): teleoperate() if __name__ == "__main__": main()
lerobot/src/lerobot/teleoperate.py/0
{ "file_path": "lerobot/src/lerobot/teleoperate.py", "repo_id": "lerobot", "token_count": 1977 }
217
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import warnings from pathlib import Path from typing import TypeVar import imageio JsonLike = str | int | float | bool | None | list["JsonLike"] | dict[str, "JsonLike"] | tuple["JsonLike", ...] T = TypeVar("T", bound=JsonLike) def write_video(video_path, stacked_frames, fps): # Filter out DeprecationWarnings raised from pkg_resources with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "pkg_resources is deprecated as an API", category=DeprecationWarning ) imageio.mimsave(video_path, stacked_frames, fps=fps) def deserialize_json_into_object(fpath: Path, obj: T) -> T: """ Loads the JSON data from `fpath` and recursively fills `obj` with the corresponding values (strictly matching structure and types). Tuples in `obj` are expected to be lists in the JSON data, which will be converted back into tuples. """ with open(fpath, encoding="utf-8") as f: data = json.load(f) def _deserialize(target, source): """ Recursively overwrite the structure in `target` with data from `source`, performing strict checks on structure and type. Returns the updated version of `target` (especially important for tuples). """ # If the target is a dictionary, source must be a dictionary as well. if isinstance(target, dict): if not isinstance(source, dict): raise TypeError(f"Type mismatch: expected dict, got {type(source)}") # Check that they have exactly the same set of keys. if target.keys() != source.keys(): raise ValueError( f"Dictionary keys do not match.\nExpected: {target.keys()}, got: {source.keys()}" ) # Recursively update each key. for k in target: target[k] = _deserialize(target[k], source[k]) return target # If the target is a list, source must be a list as well. elif isinstance(target, list): if not isinstance(source, list): raise TypeError(f"Type mismatch: expected list, got {type(source)}") # Check length if len(target) != len(source): raise ValueError(f"List length mismatch: expected {len(target)}, got {len(source)}") # Recursively update each element. for i in range(len(target)): target[i] = _deserialize(target[i], source[i]) return target # If the target is a tuple, the source must be a list in JSON, # which we'll convert back to a tuple. elif isinstance(target, tuple): if not isinstance(source, list): raise TypeError(f"Type mismatch: expected list (for tuple), got {type(source)}") if len(target) != len(source): raise ValueError(f"Tuple length mismatch: expected {len(target)}, got {len(source)}") # Convert each element, forming a new tuple. converted_items = [] for t_item, s_item in zip(target, source, strict=False): converted_items.append(_deserialize(t_item, s_item)) # Return a brand new tuple (tuples are immutable in Python). return tuple(converted_items) # Otherwise, we're dealing with a "primitive" (int, float, str, bool, None). else: # Check the exact type. If these must match 1:1, do: if type(target) is not type(source): raise TypeError(f"Type mismatch: expected {type(target)}, got {type(source)}") return source # Perform the in-place/recursive deserialization updated_obj = _deserialize(obj, data) return updated_obj
lerobot/src/lerobot/utils/io_utils.py/0
{ "file_path": "lerobot/src/lerobot/utils/io_utils.py", "repo_id": "lerobot", "token_count": 1707 }
218
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Example of running a specific test: # ```bash # pytest tests/cameras/test_opencv.py::test_connect # ``` from pathlib import Path import numpy as np import pytest from lerobot.cameras.configs import Cv2Rotation from lerobot.cameras.opencv import OpenCVCamera, OpenCVCameraConfig from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError # NOTE(Steven): more tests + assertions? TEST_ARTIFACTS_DIR = Path(__file__).parent.parent / "artifacts" / "cameras" DEFAULT_PNG_FILE_PATH = TEST_ARTIFACTS_DIR / "image_160x120.png" TEST_IMAGE_SIZES = ["128x128", "160x120", "320x180", "480x270"] TEST_IMAGE_PATHS = [TEST_ARTIFACTS_DIR / f"image_{size}.png" for size in TEST_IMAGE_SIZES] def test_abc_implementation(): """Instantiation should raise an error if the class doesn't implement abstract methods/properties.""" config = OpenCVCameraConfig(index_or_path=0) _ = OpenCVCamera(config) def test_connect(): config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH) camera = OpenCVCamera(config) camera.connect(warmup=False) assert camera.is_connected def test_connect_already_connected(): config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH) camera = OpenCVCamera(config) camera.connect(warmup=False) with pytest.raises(DeviceAlreadyConnectedError): camera.connect(warmup=False) def test_connect_invalid_camera_path(): config = OpenCVCameraConfig(index_or_path="nonexistent/camera.png") camera = OpenCVCamera(config) with pytest.raises(ConnectionError): camera.connect(warmup=False) def test_invalid_width_connect(): config = OpenCVCameraConfig( index_or_path=DEFAULT_PNG_FILE_PATH, width=99999, # Invalid width to trigger error height=480, ) camera = OpenCVCamera(config) with pytest.raises(RuntimeError): camera.connect(warmup=False) @pytest.mark.parametrize("index_or_path", TEST_IMAGE_PATHS, ids=TEST_IMAGE_SIZES) def test_read(index_or_path): config = OpenCVCameraConfig(index_or_path=index_or_path) camera = OpenCVCamera(config) camera.connect(warmup=False) img = camera.read() assert isinstance(img, np.ndarray) def test_read_before_connect(): config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH) camera = OpenCVCamera(config) with pytest.raises(DeviceNotConnectedError): _ = camera.read() def test_disconnect(): config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH) camera = OpenCVCamera(config) camera.connect(warmup=False) camera.disconnect() assert not camera.is_connected def test_disconnect_before_connect(): config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH) camera = OpenCVCamera(config) with pytest.raises(DeviceNotConnectedError): _ = camera.disconnect() @pytest.mark.parametrize("index_or_path", TEST_IMAGE_PATHS, ids=TEST_IMAGE_SIZES) def test_async_read(index_or_path): config = OpenCVCameraConfig(index_or_path=index_or_path) camera = OpenCVCamera(config) camera.connect(warmup=False) try: img = camera.async_read() assert camera.thread is not None assert camera.thread.is_alive() assert isinstance(img, np.ndarray) finally: if camera.is_connected: camera.disconnect() # To stop/join the thread. Otherwise get warnings when the test ends def test_async_read_timeout(): config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH) camera = OpenCVCamera(config) camera.connect(warmup=False) try: with pytest.raises(TimeoutError): camera.async_read(timeout_ms=0) finally: if camera.is_connected: camera.disconnect() def test_async_read_before_connect(): config = OpenCVCameraConfig(index_or_path=DEFAULT_PNG_FILE_PATH) camera = OpenCVCamera(config) with pytest.raises(DeviceNotConnectedError): _ = camera.async_read() @pytest.mark.parametrize("index_or_path", TEST_IMAGE_PATHS, ids=TEST_IMAGE_SIZES) @pytest.mark.parametrize( "rotation", [ Cv2Rotation.NO_ROTATION, Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_180, Cv2Rotation.ROTATE_270, ], ids=["no_rot", "rot90", "rot180", "rot270"], ) def test_rotation(rotation, index_or_path): filename = Path(index_or_path).name dimensions = filename.split("_")[-1].split(".")[0] # Assumes filenames format (_wxh.png) original_width, original_height = map(int, dimensions.split("x")) config = OpenCVCameraConfig(index_or_path=index_or_path, rotation=rotation) camera = OpenCVCamera(config) camera.connect(warmup=False) img = camera.read() assert isinstance(img, np.ndarray) if rotation in (Cv2Rotation.ROTATE_90, Cv2Rotation.ROTATE_270): assert camera.width == original_height assert camera.height == original_width assert img.shape[:2] == (original_width, original_height) else: assert camera.width == original_width assert camera.height == original_height assert img.shape[:2] == (original_height, original_width)
lerobot/tests/cameras/test_opencv.py/0
{ "file_path": "lerobot/tests/cameras/test_opencv.py", "repo_id": "lerobot", "token_count": 2238 }
219
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random from functools import partial from pathlib import Path from typing import Protocol from unittest.mock import patch import datasets import numpy as np import PIL.Image import pytest import torch from lerobot.datasets.lerobot_dataset import CODEBASE_VERSION, LeRobotDataset, LeRobotDatasetMetadata from lerobot.datasets.utils import ( DEFAULT_CHUNK_SIZE, DEFAULT_FEATURES, DEFAULT_PARQUET_PATH, DEFAULT_VIDEO_PATH, get_hf_features_from_features, hf_transform_to_torch, ) from tests.fixtures.constants import ( DEFAULT_FPS, DUMMY_CAMERA_FEATURES, DUMMY_MOTOR_FEATURES, DUMMY_REPO_ID, DUMMY_ROBOT_TYPE, DUMMY_VIDEO_INFO, ) class LeRobotDatasetFactory(Protocol): def __call__(self, *args, **kwargs) -> LeRobotDataset: ... def get_task_index(task_dicts: dict, task: str) -> int: tasks = {d["task_index"]: d["task"] for d in task_dicts.values()} task_to_task_index = {task: task_idx for task_idx, task in tasks.items()} return task_to_task_index[task] @pytest.fixture(scope="session") def img_tensor_factory(): def _create_img_tensor(height=100, width=100, channels=3, dtype=torch.float32) -> torch.Tensor: return torch.rand((channels, height, width), dtype=dtype) return _create_img_tensor @pytest.fixture(scope="session") def img_array_factory(): def _create_img_array(height=100, width=100, channels=3, dtype=np.uint8) -> np.ndarray: if np.issubdtype(dtype, np.unsignedinteger): # Int array in [0, 255] range img_array = np.random.randint(0, 256, size=(height, width, channels), dtype=dtype) elif np.issubdtype(dtype, np.floating): # Float array in [0, 1] range img_array = np.random.rand(height, width, channels).astype(dtype) else: raise ValueError(dtype) return img_array return _create_img_array @pytest.fixture(scope="session") def img_factory(img_array_factory): def _create_img(height=100, width=100) -> PIL.Image.Image: img_array = img_array_factory(height=height, width=width) return PIL.Image.fromarray(img_array) return _create_img @pytest.fixture(scope="session") def features_factory(): def _create_features( motor_features: dict = DUMMY_MOTOR_FEATURES, camera_features: dict = DUMMY_CAMERA_FEATURES, use_videos: bool = True, ) -> dict: if use_videos: camera_ft = { key: {"dtype": "video", **ft, **DUMMY_VIDEO_INFO} for key, ft in camera_features.items() } else: camera_ft = {key: {"dtype": "image", **ft} for key, ft in camera_features.items()} return { **motor_features, **camera_ft, **DEFAULT_FEATURES, } return _create_features @pytest.fixture(scope="session") def info_factory(features_factory): def _create_info( codebase_version: str = CODEBASE_VERSION, fps: int = DEFAULT_FPS, robot_type: str = DUMMY_ROBOT_TYPE, total_episodes: int = 0, total_frames: int = 0, total_tasks: int = 0, total_videos: int = 0, total_chunks: int = 0, chunks_size: int = DEFAULT_CHUNK_SIZE, data_path: str = DEFAULT_PARQUET_PATH, video_path: str = DEFAULT_VIDEO_PATH, motor_features: dict = DUMMY_MOTOR_FEATURES, camera_features: dict = DUMMY_CAMERA_FEATURES, use_videos: bool = True, ) -> dict: features = features_factory(motor_features, camera_features, use_videos) return { "codebase_version": codebase_version, "robot_type": robot_type, "total_episodes": total_episodes, "total_frames": total_frames, "total_tasks": total_tasks, "total_videos": total_videos, "total_chunks": total_chunks, "chunks_size": chunks_size, "fps": fps, "splits": {}, "data_path": data_path, "video_path": video_path if use_videos else None, "features": features, } return _create_info @pytest.fixture(scope="session") def stats_factory(): def _create_stats( features: dict[str] | None = None, ) -> dict: stats = {} for key, ft in features.items(): shape = ft["shape"] dtype = ft["dtype"] if dtype in ["image", "video"]: stats[key] = { "max": np.full((3, 1, 1), 1, dtype=np.float32).tolist(), "mean": np.full((3, 1, 1), 0.5, dtype=np.float32).tolist(), "min": np.full((3, 1, 1), 0, dtype=np.float32).tolist(), "std": np.full((3, 1, 1), 0.25, dtype=np.float32).tolist(), "count": [10], } else: stats[key] = { "max": np.full(shape, 1, dtype=dtype).tolist(), "mean": np.full(shape, 0.5, dtype=dtype).tolist(), "min": np.full(shape, 0, dtype=dtype).tolist(), "std": np.full(shape, 0.25, dtype=dtype).tolist(), "count": [10], } return stats return _create_stats @pytest.fixture(scope="session") def episodes_stats_factory(stats_factory): def _create_episodes_stats( features: dict[str], total_episodes: int = 3, ) -> dict: episodes_stats = {} for episode_index in range(total_episodes): episodes_stats[episode_index] = { "episode_index": episode_index, "stats": stats_factory(features), } return episodes_stats return _create_episodes_stats @pytest.fixture(scope="session") def tasks_factory(): def _create_tasks(total_tasks: int = 3) -> int: tasks = {} for task_index in range(total_tasks): task_dict = {"task_index": task_index, "task": f"Perform action {task_index}."} tasks[task_index] = task_dict return tasks return _create_tasks @pytest.fixture(scope="session") def episodes_factory(tasks_factory): def _create_episodes( total_episodes: int = 3, total_frames: int = 400, tasks: dict | None = None, multi_task: bool = False, ): if total_episodes <= 0 or total_frames <= 0: raise ValueError("num_episodes and total_length must be positive integers.") if total_frames < total_episodes: raise ValueError("total_length must be greater than or equal to num_episodes.") if not tasks: min_tasks = 2 if multi_task else 1 total_tasks = random.randint(min_tasks, total_episodes) tasks = tasks_factory(total_tasks) if total_episodes < len(tasks) and not multi_task: raise ValueError("The number of tasks should be less than the number of episodes.") # Generate random lengths that sum up to total_length lengths = np.random.multinomial(total_frames, [1 / total_episodes] * total_episodes).tolist() tasks_list = [task_dict["task"] for task_dict in tasks.values()] num_tasks_available = len(tasks_list) episodes = {} remaining_tasks = tasks_list.copy() for ep_idx in range(total_episodes): num_tasks_in_episode = random.randint(1, min(3, num_tasks_available)) if multi_task else 1 tasks_to_sample = remaining_tasks if remaining_tasks else tasks_list episode_tasks = random.sample(tasks_to_sample, min(num_tasks_in_episode, len(tasks_to_sample))) if remaining_tasks: for task in episode_tasks: remaining_tasks.remove(task) episodes[ep_idx] = { "episode_index": ep_idx, "tasks": episode_tasks, "length": lengths[ep_idx], } return episodes return _create_episodes @pytest.fixture(scope="session") def hf_dataset_factory(features_factory, tasks_factory, episodes_factory, img_array_factory): def _create_hf_dataset( features: dict | None = None, tasks: list[dict] | None = None, episodes: list[dict] | None = None, fps: int = DEFAULT_FPS, ) -> datasets.Dataset: if not tasks: tasks = tasks_factory() if not episodes: episodes = episodes_factory() if not features: features = features_factory() timestamp_col = np.array([], dtype=np.float32) frame_index_col = np.array([], dtype=np.int64) episode_index_col = np.array([], dtype=np.int64) task_index = np.array([], dtype=np.int64) for ep_dict in episodes.values(): timestamp_col = np.concatenate((timestamp_col, np.arange(ep_dict["length"]) / fps)) frame_index_col = np.concatenate((frame_index_col, np.arange(ep_dict["length"], dtype=int))) episode_index_col = np.concatenate( (episode_index_col, np.full(ep_dict["length"], ep_dict["episode_index"], dtype=int)) ) ep_task_index = get_task_index(tasks, ep_dict["tasks"][0]) task_index = np.concatenate((task_index, np.full(ep_dict["length"], ep_task_index, dtype=int))) index_col = np.arange(len(episode_index_col)) robot_cols = {} for key, ft in features.items(): if ft["dtype"] == "image": robot_cols[key] = [ img_array_factory(height=ft["shapes"][1], width=ft["shapes"][0]) for _ in range(len(index_col)) ] elif ft["shape"][0] > 1 and ft["dtype"] != "video": robot_cols[key] = np.random.random((len(index_col), ft["shape"][0])).astype(ft["dtype"]) hf_features = get_hf_features_from_features(features) dataset = datasets.Dataset.from_dict( { **robot_cols, "timestamp": timestamp_col, "frame_index": frame_index_col, "episode_index": episode_index_col, "index": index_col, "task_index": task_index, }, features=hf_features, ) dataset.set_transform(hf_transform_to_torch) return dataset return _create_hf_dataset @pytest.fixture(scope="session") def lerobot_dataset_metadata_factory( info_factory, stats_factory, episodes_stats_factory, tasks_factory, episodes_factory, mock_snapshot_download_factory, ): def _create_lerobot_dataset_metadata( root: Path, repo_id: str = DUMMY_REPO_ID, info: dict | None = None, stats: dict | None = None, episodes_stats: list[dict] | None = None, tasks: list[dict] | None = None, episodes: list[dict] | None = None, ) -> LeRobotDatasetMetadata: if not info: info = info_factory() if not stats: stats = stats_factory(features=info["features"]) if not episodes_stats: episodes_stats = episodes_stats_factory( features=info["features"], total_episodes=info["total_episodes"] ) if not tasks: tasks = tasks_factory(total_tasks=info["total_tasks"]) if not episodes: episodes = episodes_factory( total_episodes=info["total_episodes"], total_frames=info["total_frames"], tasks=tasks ) mock_snapshot_download = mock_snapshot_download_factory( info=info, stats=stats, episodes_stats=episodes_stats, tasks=tasks, episodes=episodes, ) with ( patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version_patch, patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download_patch, ): mock_get_safe_version_patch.side_effect = lambda repo_id, version: version mock_snapshot_download_patch.side_effect = mock_snapshot_download return LeRobotDatasetMetadata(repo_id=repo_id, root=root) return _create_lerobot_dataset_metadata @pytest.fixture(scope="session") def lerobot_dataset_factory( info_factory, stats_factory, episodes_stats_factory, tasks_factory, episodes_factory, hf_dataset_factory, mock_snapshot_download_factory, lerobot_dataset_metadata_factory, ) -> LeRobotDatasetFactory: def _create_lerobot_dataset( root: Path, repo_id: str = DUMMY_REPO_ID, total_episodes: int = 3, total_frames: int = 150, total_tasks: int = 1, multi_task: bool = False, info: dict | None = None, stats: dict | None = None, episodes_stats: list[dict] | None = None, tasks: list[dict] | None = None, episode_dicts: list[dict] | None = None, hf_dataset: datasets.Dataset | None = None, **kwargs, ) -> LeRobotDataset: if not info: info = info_factory( total_episodes=total_episodes, total_frames=total_frames, total_tasks=total_tasks ) if not stats: stats = stats_factory(features=info["features"]) if not episodes_stats: episodes_stats = episodes_stats_factory(features=info["features"], total_episodes=total_episodes) if not tasks: tasks = tasks_factory(total_tasks=info["total_tasks"]) if not episode_dicts: episode_dicts = episodes_factory( total_episodes=info["total_episodes"], total_frames=info["total_frames"], tasks=tasks, multi_task=multi_task, ) if not hf_dataset: hf_dataset = hf_dataset_factory(tasks=tasks, episodes=episode_dicts, fps=info["fps"]) mock_snapshot_download = mock_snapshot_download_factory( info=info, stats=stats, episodes_stats=episodes_stats, tasks=tasks, episodes=episode_dicts, hf_dataset=hf_dataset, ) mock_metadata = lerobot_dataset_metadata_factory( root=root, repo_id=repo_id, info=info, stats=stats, episodes_stats=episodes_stats, tasks=tasks, episodes=episode_dicts, ) with ( patch("lerobot.datasets.lerobot_dataset.LeRobotDatasetMetadata") as mock_metadata_patch, patch("lerobot.datasets.lerobot_dataset.get_safe_version") as mock_get_safe_version_patch, patch("lerobot.datasets.lerobot_dataset.snapshot_download") as mock_snapshot_download_patch, ): mock_metadata_patch.return_value = mock_metadata mock_get_safe_version_patch.side_effect = lambda repo_id, version: version mock_snapshot_download_patch.side_effect = mock_snapshot_download return LeRobotDataset(repo_id=repo_id, root=root, **kwargs) return _create_lerobot_dataset @pytest.fixture(scope="session") def empty_lerobot_dataset_factory() -> LeRobotDatasetFactory: return partial(LeRobotDataset.create, repo_id=DUMMY_REPO_ID, fps=DEFAULT_FPS)
lerobot/tests/fixtures/dataset_factories.py/0
{ "file_path": "lerobot/tests/fixtures/dataset_factories.py", "repo_id": "lerobot", "token_count": 7537 }
220
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from copy import deepcopy from pathlib import Path import einops import pytest import torch from packaging import version from safetensors.torch import load_file from lerobot import available_policies from lerobot.configs.default import DatasetConfig from lerobot.configs.train import TrainPipelineConfig from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature from lerobot.constants import ACTION, OBS_STATE from lerobot.datasets.factory import make_dataset from lerobot.datasets.utils import cycle, dataset_to_policy_features from lerobot.envs.factory import make_env, make_env_config from lerobot.envs.utils import preprocess_observation from lerobot.optim.factory import make_optimizer_and_scheduler from lerobot.policies.act.configuration_act import ACTConfig from lerobot.policies.act.modeling_act import ACTTemporalEnsembler from lerobot.policies.factory import ( get_policy_class, make_policy, make_policy_config, ) from lerobot.policies.normalize import Normalize, Unnormalize from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.utils.random_utils import seeded_context from tests.artifacts.policies.save_policy_to_safetensors import get_policy_stats from tests.utils import DEVICE, require_cpu, require_env, require_x86_64_kernel @pytest.fixture def dummy_dataset_metadata(lerobot_dataset_metadata_factory, info_factory, tmp_path): # Create only one camera input which is squared to fit all current policy constraints # e.g. vqbet and tdmpc works with one camera only, and tdmpc requires it to be squared camera_features = { "observation.images.laptop": { "shape": (84, 84, 3), "names": ["height", "width", "channels"], "info": None, }, } motor_features = { "action": { "dtype": "float32", "shape": (6,), "names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"], }, "observation.state": { "dtype": "float32", "shape": (6,), "names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"], }, } info = info_factory( total_episodes=1, total_frames=1, camera_features=camera_features, motor_features=motor_features ) ds_meta = lerobot_dataset_metadata_factory(root=tmp_path / "init", info=info) return ds_meta @pytest.mark.parametrize("policy_name", available_policies) def test_get_policy_and_config_classes(policy_name: str): """Check that the correct policy and config classes are returned.""" policy_cls = get_policy_class(policy_name) policy_cfg = make_policy_config(policy_name) assert policy_cls.name == policy_name assert issubclass( policy_cfg.__class__, inspect.signature(policy_cls.__init__).parameters["config"].annotation ) @pytest.mark.parametrize( "ds_repo_id,env_name,env_kwargs,policy_name,policy_kwargs", [ ("lerobot/xarm_lift_medium", "xarm", {}, "tdmpc", {"use_mpc": True}), ("lerobot/pusht", "pusht", {}, "diffusion", {}), ("lerobot/pusht", "pusht", {}, "vqbet", {}), ("lerobot/pusht", "pusht", {}, "act", {}), ("lerobot/aloha_sim_insertion_human", "aloha", {"task": "AlohaInsertion-v0"}, "act", {}), ( "lerobot/aloha_sim_insertion_scripted", "aloha", {"task": "AlohaInsertion-v0"}, "act", {}, ), ( "lerobot/aloha_sim_insertion_human", "aloha", {"task": "AlohaInsertion-v0"}, "diffusion", {}, ), ( "lerobot/aloha_sim_transfer_cube_human", "aloha", {"task": "AlohaTransferCube-v0"}, "act", {}, ), ( "lerobot/aloha_sim_transfer_cube_scripted", "aloha", {"task": "AlohaTransferCube-v0"}, "act", {}, ), ], ) @require_env def test_policy(ds_repo_id, env_name, env_kwargs, policy_name, policy_kwargs): """ Tests: - Making the policy object. - Checking that the policy follows the correct protocol and subclasses nn.Module and PyTorchModelHubMixin. - Updating the policy. - Using the policy to select actions at inference time. - Test the action can be applied to the policy Note: We test various combinations of policy and dataset. The combinations are by no means exhaustive, and for now we add tests as we see fit. """ train_cfg = TrainPipelineConfig( # TODO(rcadene, aliberts): remove dataset download dataset=DatasetConfig(repo_id=ds_repo_id, episodes=[0]), policy=make_policy_config(policy_name, push_to_hub=False, **policy_kwargs), env=make_env_config(env_name, **env_kwargs), ) train_cfg.validate() # Check that we can make the policy object. dataset = make_dataset(train_cfg) policy = make_policy(train_cfg.policy, ds_meta=dataset.meta) assert isinstance(policy, PreTrainedPolicy) # Check that we run select_actions and get the appropriate output. env = make_env(train_cfg.env, n_envs=2) dataloader = torch.utils.data.DataLoader( dataset, num_workers=0, batch_size=2, shuffle=True, pin_memory=DEVICE != "cpu", drop_last=True, ) dl_iter = cycle(dataloader) batch = next(dl_iter) for key in batch: if isinstance(batch[key], torch.Tensor): batch[key] = batch[key].to(DEVICE, non_blocking=True) # Test updating the policy (and test that it does not mutate the batch) batch_ = deepcopy(batch) policy.forward(batch) assert set(batch) == set(batch_), "Batch keys are not the same after a forward pass." assert all( torch.equal(batch[k], batch_[k]) if isinstance(batch[k], torch.Tensor) else batch[k] == batch_[k] for k in batch ), "Batch values are not the same after a forward pass." # reset the policy and environment policy.reset() observation, _ = env.reset(seed=train_cfg.seed) # apply transform to normalize the observations observation = preprocess_observation(observation) # send observation to device/gpu observation = {key: observation[key].to(DEVICE, non_blocking=True) for key in observation} # get the next action for the environment (also check that the observation batch is not modified) observation_ = deepcopy(observation) with torch.inference_mode(): action = policy.select_action(observation).cpu().numpy() assert set(observation) == set(observation_), ( "Observation batch keys are not the same after a forward pass." ) assert all(torch.equal(observation[k], observation_[k]) for k in observation), ( "Observation batch values are not the same after a forward pass." ) # Test step through policy env.step(action) # TODO(rcadene, aliberts): This test is quite end-to-end. Move this test in test_optimizer? def test_act_backbone_lr(): """ Test that the ACT policy can be instantiated with a different learning rate for the backbone. """ cfg = TrainPipelineConfig( # TODO(rcadene, aliberts): remove dataset download dataset=DatasetConfig(repo_id="lerobot/aloha_sim_insertion_scripted", episodes=[0]), policy=make_policy_config("act", optimizer_lr=0.01, optimizer_lr_backbone=0.001, push_to_hub=False), ) cfg.validate() # Needed for auto-setting some parameters assert cfg.policy.optimizer_lr == 0.01 assert cfg.policy.optimizer_lr_backbone == 0.001 dataset = make_dataset(cfg) policy = make_policy(cfg.policy, ds_meta=dataset.meta) optimizer, _ = make_optimizer_and_scheduler(cfg, policy) assert len(optimizer.param_groups) == 2 assert optimizer.param_groups[0]["lr"] == cfg.policy.optimizer_lr assert optimizer.param_groups[1]["lr"] == cfg.policy.optimizer_lr_backbone assert len(optimizer.param_groups[0]["params"]) == 133 assert len(optimizer.param_groups[1]["params"]) == 20 @pytest.mark.parametrize("policy_name", available_policies) def test_policy_defaults(dummy_dataset_metadata, policy_name: str): """Check that the policy can be instantiated with defaults.""" policy_cls = get_policy_class(policy_name) policy_cfg = make_policy_config(policy_name) features = dataset_to_policy_features(dummy_dataset_metadata.features) policy_cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} policy_cfg.input_features = { key: ft for key, ft in features.items() if key not in policy_cfg.output_features } policy_cls(policy_cfg) @pytest.mark.parametrize("policy_name", available_policies) def test_save_and_load_pretrained(dummy_dataset_metadata, tmp_path, policy_name: str): policy_cls = get_policy_class(policy_name) policy_cfg = make_policy_config(policy_name) features = dataset_to_policy_features(dummy_dataset_metadata.features) policy_cfg.output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION} policy_cfg.input_features = { key: ft for key, ft in features.items() if key not in policy_cfg.output_features } policy = policy_cls(policy_cfg) policy.to(policy_cfg.device) save_dir = tmp_path / f"test_save_and_load_pretrained_{policy_cls.__name__}" policy.save_pretrained(save_dir) loaded_policy = policy_cls.from_pretrained(save_dir, config=policy_cfg) torch.testing.assert_close(list(policy.parameters()), list(loaded_policy.parameters()), rtol=0, atol=0) @pytest.mark.parametrize("insert_temporal_dim", [False, True]) def test_normalize(insert_temporal_dim): """ Test that normalize/unnormalize can run without exceptions when properly set up, and that they raise an exception when the forward pass is called without the stats having been provided. TODO(rcadene, alexander-soare): This should also test that the normalization / unnormalization works as expected. """ input_features = { "observation.image": PolicyFeature( type=FeatureType.VISUAL, shape=(3, 96, 96), ), "observation.state": PolicyFeature( type=FeatureType.STATE, shape=(10,), ), } output_features = { "action": PolicyFeature( type=FeatureType.ACTION, shape=(5,), ), } norm_map = { "VISUAL": NormalizationMode.MEAN_STD, "STATE": NormalizationMode.MIN_MAX, "ACTION": NormalizationMode.MIN_MAX, } dataset_stats = { "observation.image": { "mean": torch.randn(3, 1, 1), "std": torch.randn(3, 1, 1), "min": torch.randn(3, 1, 1), "max": torch.randn(3, 1, 1), }, "observation.state": { "mean": torch.randn(10), "std": torch.randn(10), "min": torch.randn(10), "max": torch.randn(10), }, "action": { "mean": torch.randn(5), "std": torch.randn(5), "min": torch.randn(5), "max": torch.randn(5), }, } bsize = 2 input_batch = { "observation.image": torch.randn(bsize, 3, 96, 96), "observation.state": torch.randn(bsize, 10), } output_batch = { "action": torch.randn(bsize, 5), } if insert_temporal_dim: tdim = 4 for key in input_batch: # [2,3,96,96] -> [2,tdim,3,96,96] input_batch[key] = torch.stack([input_batch[key]] * tdim, dim=1) for key in output_batch: output_batch[key] = torch.stack([output_batch[key]] * tdim, dim=1) # test without stats normalize = Normalize(input_features, norm_map, stats=None) with pytest.raises(AssertionError): normalize(input_batch) # test with stats normalize = Normalize(input_features, norm_map, stats=dataset_stats) normalize(input_batch) # test loading pretrained models new_normalize = Normalize(input_features, norm_map, stats=None) new_normalize.load_state_dict(normalize.state_dict()) new_normalize(input_batch) # test without stats unnormalize = Unnormalize(output_features, norm_map, stats=None) with pytest.raises(AssertionError): unnormalize(output_batch) # test with stats unnormalize = Unnormalize(output_features, norm_map, stats=dataset_stats) unnormalize(output_batch) # test loading pretrained models new_unnormalize = Unnormalize(output_features, norm_map, stats=None) new_unnormalize.load_state_dict(unnormalize.state_dict()) unnormalize(output_batch) @pytest.mark.parametrize("multikey", [True, False]) def test_multikey_construction(multikey: bool): """ Asserts that multiple keys with type State/Action are correctly processed by the policy constructor, preventing erroneous creation of the policy object. """ input_features = { "observation.state": PolicyFeature( type=FeatureType.STATE, shape=(10,), ), } output_features = { "action": PolicyFeature( type=FeatureType.ACTION, shape=(5,), ), } if multikey: """Simulates the complete state/action is constructed from more granular multiple keys, of the same type as the overall state/action""" input_features = {} input_features["observation.state.subset1"] = PolicyFeature(type=FeatureType.STATE, shape=(5,)) input_features["observation.state.subset2"] = PolicyFeature(type=FeatureType.STATE, shape=(5,)) input_features["observation.state"] = PolicyFeature(type=FeatureType.STATE, shape=(10,)) output_features = {} output_features["action.first_three_motors"] = PolicyFeature(type=FeatureType.ACTION, shape=(3,)) output_features["action.last_two_motors"] = PolicyFeature(type=FeatureType.ACTION, shape=(2,)) output_features["action"] = PolicyFeature( type=FeatureType.ACTION, shape=(5,), ) config = ACTConfig(input_features=input_features, output_features=output_features) state_condition = config.robot_state_feature == input_features[OBS_STATE] action_condition = config.action_feature == output_features[ACTION] assert state_condition, ( f"Discrepancy detected. Robot state feature is {config.robot_state_feature} but policy expects {input_features[OBS_STATE]}" ) assert action_condition, ( f"Discrepancy detected. Action feature is {config.action_feature} but policy expects {output_features[ACTION]}" ) @pytest.mark.parametrize( "ds_repo_id, policy_name, policy_kwargs, file_name_extra", [ # TODO(alexander-soare): `policy.use_mpc=false` was previously the default in the config yaml but it # was changed to true. For some reason, tests would pass locally, but not in CI. So here we override # to test with `policy.use_mpc=false`. ("lerobot/xarm_lift_medium", "tdmpc", {"use_mpc": False}, "use_policy"), # ("lerobot/xarm_lift_medium", "tdmpc", {"use_mpc": True}, "use_mpc"), # TODO(rcadene): the diffusion model was normalizing the image in mean=0.5 std=0.5 which is a hack supposed to # to normalize the image at all. In our current codebase we dont normalize at all. But there is still a minor difference # that fails the test. However, by testing to normalize the image with 0.5 0.5 in the current codebase, the test pass. # Thus, we deactivate this test for now. ( "lerobot/pusht", "diffusion", { "n_action_steps": 8, "num_inference_steps": 10, "down_dims": [128, 256, 512], }, "", ), ("lerobot/aloha_sim_insertion_human", "act", {"n_action_steps": 10}, ""), ( "lerobot/aloha_sim_insertion_human", "act", {"n_action_steps": 1000, "chunk_size": 1000}, "1000_steps", ), ], ) # As artifacts have been generated on an x86_64 kernel, this test won't # pass if it's run on another platform due to floating point errors @require_x86_64_kernel @require_cpu def test_backward_compatibility(ds_repo_id: str, policy_name: str, policy_kwargs: dict, file_name_extra: str): """ NOTE: If this test does not pass, and you have intentionally changed something in the policy: 1. Inspect the differences in policy outputs and make sure you can account for them. Your PR should include a report on what changed and how that affected the outputs. 2. Go to the `if __name__ == "__main__"` block of `tests/scripts/save_policy_to_safetensors.py` and add the policies you want to update the test artifacts for. 3. Run `python tests/scripts/save_policy_to_safetensors.py`. The test artifact should be updated. 4. Check that this test now passes. 5. Remember to restore `tests/scripts/save_policy_to_safetensors.py` to its original state. 6. Remember to stage and commit the resulting changes to `tests/artifacts`. NOTE: If the test does not pass, and you don't change the policy, it is likely that the test artifact is out of date. For example, some PyTorch versions have different randomness, see this PR: https://github.com/huggingface/lerobot/pull/1127. """ # NOTE: ACT policy has different randomness, after PyTorch 2.7.0 if policy_name == "act" and version.parse(torch.__version__) < version.parse("2.7.0"): pytest.skip(f"Skipping act policy test with PyTorch {torch.__version__}. Requires PyTorch >= 2.7.0") ds_name = ds_repo_id.split("/")[-1] artifact_dir = Path("tests/artifacts/policies") / f"{ds_name}_{policy_name}_{file_name_extra}" saved_output_dict = load_file(artifact_dir / "output_dict.safetensors") saved_grad_stats = load_file(artifact_dir / "grad_stats.safetensors") saved_param_stats = load_file(artifact_dir / "param_stats.safetensors") saved_actions = load_file(artifact_dir / "actions.safetensors") output_dict, grad_stats, param_stats, actions = get_policy_stats(ds_repo_id, policy_name, policy_kwargs) for key in saved_output_dict: torch.testing.assert_close(output_dict[key], saved_output_dict[key]) for key in saved_grad_stats: torch.testing.assert_close(grad_stats[key], saved_grad_stats[key]) for key in saved_param_stats: torch.testing.assert_close(param_stats[key], saved_param_stats[key]) for key in saved_actions: rtol, atol = (2e-3, 5e-6) if policy_name == "diffusion" else (None, None) # HACK torch.testing.assert_close(actions[key], saved_actions[key], rtol=rtol, atol=atol) def test_act_temporal_ensembler(): """Check that the online method in ACTTemporalEnsembler matches a simple offline calculation.""" temporal_ensemble_coeff = 0.01 chunk_size = 100 episode_length = 101 ensembler = ACTTemporalEnsembler(temporal_ensemble_coeff, chunk_size) # An batch of arbitrary sequences of 1D actions we wish to compute the average over. We'll keep the # "action space" in [-1, 1]. Apart from that, there is no real reason for the numbers chosen. with seeded_context(0): # Dimension is (batch, episode_length, chunk_size, action_dim(=1)) # Stepping through the episode_length dim is like running inference at each rollout step and getting # a different action chunk. batch_seq = torch.stack( [ torch.rand(episode_length, chunk_size) * 0.05 - 0.6, torch.rand(episode_length, chunk_size) * 0.02 - 0.01, torch.rand(episode_length, chunk_size) * 0.2 + 0.3, ], dim=0, ).unsqueeze(-1) # unsqueeze for action dim batch_size = batch_seq.shape[0] # Exponential weighting (normalized). Unsqueeze once to match the position of the `episode_length` # dimension of `batch_seq`. weights = torch.exp(-temporal_ensemble_coeff * torch.arange(chunk_size)).unsqueeze(-1) # Simulate stepping through a rollout and computing a batch of actions with model on each step. for i in range(episode_length): # Mock a batch of actions. actions = torch.zeros(size=(batch_size, chunk_size, 1)) + batch_seq[:, i] online_avg = ensembler.update(actions) # Simple offline calculation: avg = Σ(aᵢ*wᵢ) / Σ(wᵢ). # Note: The complicated bit here is the slicing. Think about the (episode_length, chunk_size) grid. # What we want to do is take diagonal slices across it starting from the left. # eg: chunk_size=4, episode_length=6 # ┌───────┐ # │0 1 2 3│ # │1 2 3 4│ # │2 3 4 5│ # │3 4 5 6│ # │4 5 6 7│ # │5 6 7 8│ # └───────┘ chunk_indices = torch.arange(min(i, chunk_size - 1), -1, -1) episode_step_indices = torch.arange(i + 1)[-len(chunk_indices) :] seq_slice = batch_seq[:, episode_step_indices, chunk_indices] offline_avg = ( einops.reduce(seq_slice * weights[: i + 1], "b s 1 -> b 1", "sum") / weights[: i + 1].sum() ) # Sanity check. The average should be between the extrema. assert torch.all(einops.reduce(seq_slice, "b s 1 -> b 1", "min") <= offline_avg) assert torch.all(offline_avg <= einops.reduce(seq_slice, "b s 1 -> b 1", "max")) # Selected atol=1e-4 keeping in mind actions in [-1, 1] and excepting 0.01% error. torch.testing.assert_close(online_avg, offline_avg, rtol=1e-4, atol=1e-4)
lerobot/tests/policies/test_policies.py/0
{ "file_path": "lerobot/tests/policies/test_policies.py", "repo_id": "lerobot", "token_count": 9161 }
221
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from lerobot.utils.encoding_utils import ( decode_sign_magnitude, decode_twos_complement, encode_sign_magnitude, encode_twos_complement, ) @pytest.mark.parametrize( "value, sign_bit_index, expected", [ (5, 4, 5), (0, 4, 0), (7, 3, 7), (-1, 4, 17), (-8, 4, 24), (-3, 3, 11), ], ) def test_encode_sign_magnitude(value, sign_bit_index, expected): assert encode_sign_magnitude(value, sign_bit_index) == expected @pytest.mark.parametrize( "encoded, sign_bit_index, expected", [ (5, 4, 5), (0, 4, 0), (7, 3, 7), (17, 4, -1), (24, 4, -8), (11, 3, -3), ], ) def test_decode_sign_magnitude(encoded, sign_bit_index, expected): assert decode_sign_magnitude(encoded, sign_bit_index) == expected @pytest.mark.parametrize( "encoded, sign_bit_index", [ (16, 4), (-9, 3), ], ) def test_encode_raises_on_overflow(encoded, sign_bit_index): with pytest.raises(ValueError): encode_sign_magnitude(encoded, sign_bit_index) def test_encode_decode_sign_magnitude(): for sign_bit_index in range(2, 6): max_val = (1 << sign_bit_index) - 1 for value in range(-max_val, max_val + 1): encoded = encode_sign_magnitude(value, sign_bit_index) decoded = decode_sign_magnitude(encoded, sign_bit_index) assert decoded == value, f"Failed at value={value}, index={sign_bit_index}" @pytest.mark.parametrize( "value, n_bytes, expected", [ (0, 1, 0), (5, 1, 5), (-1, 1, 255), (-128, 1, 128), (-2, 1, 254), (127, 1, 127), (0, 2, 0), (5, 2, 5), (-1, 2, 65_535), (-32_768, 2, 32_768), (-2, 2, 65_534), (32_767, 2, 32_767), (0, 4, 0), (5, 4, 5), (-1, 4, 4_294_967_295), (-2_147_483_648, 4, 2_147_483_648), (-2, 4, 4_294_967_294), (2_147_483_647, 4, 2_147_483_647), ], ) def test_encode_twos_complement(value, n_bytes, expected): assert encode_twos_complement(value, n_bytes) == expected @pytest.mark.parametrize( "value, n_bytes, expected", [ (0, 1, 0), (5, 1, 5), (255, 1, -1), (128, 1, -128), (254, 1, -2), (127, 1, 127), (0, 2, 0), (5, 2, 5), (65_535, 2, -1), (32_768, 2, -32_768), (65_534, 2, -2), (32_767, 2, 32_767), (0, 4, 0), (5, 4, 5), (4_294_967_295, 4, -1), (2_147_483_648, 4, -2_147_483_648), (4_294_967_294, 4, -2), (2_147_483_647, 4, 2_147_483_647), ], ) def test_decode_twos_complement(value, n_bytes, expected): assert decode_twos_complement(value, n_bytes) == expected @pytest.mark.parametrize( "value, n_bytes", [ (-129, 1), (128, 1), (-32_769, 2), (32_768, 2), (-2_147_483_649, 4), (2_147_483_648, 4), ], ) def test_encode_twos_complement_out_of_range(value, n_bytes): with pytest.raises(ValueError): encode_twos_complement(value, n_bytes) @pytest.mark.parametrize( "value, n_bytes", [ (-128, 1), (-1, 1), (0, 1), (1, 1), (127, 1), (-32_768, 2), (-1, 2), (0, 2), (1, 2), (32_767, 2), (-2_147_483_648, 4), (-1, 4), (0, 4), (1, 4), (2_147_483_647, 4), ], ) def test_encode_decode_twos_complement(value, n_bytes): encoded = encode_twos_complement(value, n_bytes) decoded = decode_twos_complement(encoded, n_bytes) assert decoded == value, f"Failed at value={value}, n_bytes={n_bytes}"
lerobot/tests/utils/test_encoding_utils.py/0
{ "file_path": "lerobot/tests/utils/test_encoding_utils.py", "repo_id": "lerobot", "token_count": 2243 }
222
# Model arguments model_name_or_path: Qwen/Qwen2.5-1.5B-Instruct model_revision: main torch_dtype: bfloat16 attn_implementation: flash_attention_2 # Data training arguments dataset_name: open-r1/OpenR1-Math-220k dataset_prompt_column: problem system_prompt: "You are a helpful AI Assistant that provides well-reasoned and detailed responses. You first think about the reasoning process as an internal monologue and then provide the user with the answer. Respond in the following format: <think>\n...\n</think>\n<answer>\n...\n</answer>" # GRPO trainer config bf16: true use_vllm: true do_eval: false gradient_accumulation_steps: 4 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false hub_model_id: Qwen2.5-1.5B-Open-R1-GRPO hub_strategy: every_save learning_rate: 2.0e-05 log_completions: true log_level: info logging_first_step: true logging_steps: 1 logging_strategy: steps lr_scheduler_type: cosine max_prompt_length: 512 max_completion_length: 1024 max_steps: -1 num_generations: 16 num_train_epochs: 1 output_dir: data/Qwen2.5-1.5B-Open-R1-GRPO overwrite_output_dir: true per_device_eval_batch_size: 16 per_device_train_batch_size: 16 push_to_hub: true report_to: - wandb reward_funcs: - accuracy - format - tag_count reward_weights: - 1.0 - 1.0 - 1.0 save_strategy: "epoch" save_total_limit: 1 seed: 42 warmup_ratio: 0.1
open-r1/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo.yaml/0
{ "file_path": "open-r1/recipes/Qwen2.5-1.5B-Instruct/grpo/config_demo.yaml", "repo_id": "open-r1", "token_count": 514 }
223
import argparse from transformers import AutoConfig from math import gcd def get_tensor_parallel_size(model_name: str, revision: str = None, default_tp: int = 8) -> int: try: config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=True) num_heads = getattr(config, 'num_attention_heads', None) if num_heads is not None and num_heads % default_tp != 0: tp = gcd(num_heads, default_tp) return max(tp, 1) else: return default_tp except Exception as e: print(f"Warning: Failed to fetch config for {model_name}@{revision}: {e}") return default_tp if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, required=True, help="Hugging Face model name or path") parser.add_argument("--revision", type=str, default=None, help="Model revision if applicable") parser.add_argument("--default_tp", type=int, default=8, help="Default TP size (usually GPUs per node)") args = parser.parse_args() tp = get_tensor_parallel_size(args.model_name, args.revision, args.default_tp) print(tp)
open-r1/scripts/get_tensor_parallel_size.py/0
{ "file_path": "open-r1/scripts/get_tensor_parallel_size.py", "repo_id": "open-r1", "token_count": 448 }
224
# Piston workers (slurm) We have built a [piston](https://github.com/engineer-man/piston) package to run IOI problems. To launch a fleet of piston workers on a slurm cluster, you can adapt the paths in `launch_piston_workers.sh` and `launch_single_piston.sh` and run: ```bash slurm/piston/launch_piston_workers.sh (number of workers to launch) ``` This command will launch a slurm job for each worker, which will be called `piston-worker-<port>`, where `<port>` is the port where the worker will be listening. ## First time setup You will need to install the [IOI package](https://github.com/guipenedo/piston/tree/master/packages/cms_ioi/1.0.0) in the workers. 1. Launch a single worker: ```bash slurm/piston/launch_piston_workers.sh 1 ``` 2. Assuming it's running on `ip-10-53-86-146:1234`, send the package install request: For IOI: ```bash curl -X POST http://ip-10-53-86-146:1234/api/v2/packages -H "Content-Type: application/json" -d '{"language": "cms_ioi", "version": "1.0.0"}' ``` For CodeForces: ```bash curl -X POST http://ip-10-53-86-146:1234/api/v2/packages -H "Content-Type: application/json" -d '{"language": "codeforces", "version": "1.0.0"}' ``` 3. You can now launch more workers and due to the shared mounted packages directory, they should already have the package installed. To have the main script find the workers automatically, you can export the following environment variable: ```bash export PISTON_ENDPOINTS=slurm ``` Alternatively your can add `PISTON_ENDPOINTS=slurm` to your .env file. You can also change `PISTON_MAX_REQUESTS_PER_ENDPOINT`, which tries to limit how many simultaneous requests each worker will handle (1 by default). Keep in mind that this is a local limit and in distributed setups, as there is no global limit, workers might sometimes be overwhelmed when some processes hit the same worker. If you would like to adapt the code to run without piston, please see the [ioi repo](https://github.com/huggingface/ioi). For CodeForces, you should implement the [`run`](https://github.com/guipenedo/piston/blob/master/packages/codeforces/1.0.0/run) and [`compile`](https://github.com/guipenedo/piston/blob/master/packages/codeforces/1.0.0/compile) scripts. # Piston workers (local docker) This will launch a single worker in a docker container. Consider launching multiple workers for better scalability. Replace 2000 with the port you want to use. Make sure to change `/path/to/local/packages` to the path you want to persist for package installs. ```bash docker run -d \ --name piston_worker \ -v /path/to/local/packages:/piston/packages \ -e PORT=2000 \ -e PISTON_COMPILE_TIMEOUT=60000 \ -e PISTON_RUN_TIMEOUT=60000 \ -e PISTON_OUTPUT_MAX_SIZE=1000000000 \ -e PISTON_MAX_FILE_SIZE=1000000000 \ -e PISTON_DISABLE_NETWORKING=true \ -e PISTON_REPO_URL=https://github.com/guipenedo/piston/releases/download/pkgs/index \ -p 2000:2000 \ --entrypoint /bin/bash \ ghcr.io/engineer-man/piston@sha256:63b5654156a89c5a2ad281aface21416615d62ec056d88efe8fcd307ce73575a \ -c "sed -i '/app.use(body_parser.urlencoded/c\ app.use(body_parser.urlencoded({ extended: true, limit: \"512mb\" }));' src/index.js && \ sed -i '/app.use(body_parser.json/c\ app.use(body_parser.json({ limit: \"512mb\" }));' src/index.js && \ node src" ``` Install the package: For IOI: ```bash curl -X POST http://localhost:2000/api/v2/packages -H "Content-Type: application/json" -d '{"language": "cms_ioi", "version": "1.0.0"}' ``` For CodeForces: ```bash curl -X POST http://localhost:2000/api/v2/packages -H "Content-Type: application/json" -d '{"language": "codeforces", "version": "1.0.0"}' ``` Remember to set `PISTON_ENDPOINTS`: ```bash export PISTON_ENDPOINTS=http://localhost:2000/api/v2,http://localhost:2001/api/v2,http://localhost:2002/api/v2 ```
open-r1/slurm/piston/README.md/0
{ "file_path": "open-r1/slurm/piston/README.md", "repo_id": "open-r1", "token_count": 1361 }
225
import asyncio import os from io import BytesIO from typing import Literal from async_lru import alru_cache from .piston_client import PistonClient from .utils import batched async def score_single_test_case( client: PistonClient, problem_data: dict, test_input: str, test_output: str, submission: str, submission_language: str = "cpp", ) -> tuple[str, str]: if submission_language not in ["python", "cpp"]: raise ValueError(f"Invalid submission language: {submission_language}") try: result = await client.send_execute( { "files": [ {"name": f"main.{submission_language}", "content": submission}, *( [{"name": "checker.py", "content": problem_data["generated_checker"]}] if problem_data["generated_checker"] else [] ), {"name": "input.txt", "content": test_input}, {"name": "correct_output.txt", "content": test_output}, { "name": "grader_config", "content": "\n".join( f"{key}={value}" for key, value in { "TIME_LIMIT": problem_data["time_limit"], "MEMORY_LIMIT": problem_data["memory_limit"], "INPUT_MODE": problem_data["input_mode"], }.items() ), }, ], "run_timeout": (problem_data["time_limit"] + 10) * 1000, # +10 seconds hard limit. time limits are handled by the codeforces script }, language="cf_python3" if submission_language == "python" else "c++17", ) except Exception as e: print(f"Error scoring submission: {e}") return False return result @alru_cache(maxsize=32) # TODO make this configurable async def get_generated_contest_tests(contest_id: str) -> list[dict]: import pandas as pd import aiofiles import aiofiles.os tests_folder = os.environ.get("CF_TESTS_FOLDER", None) if not tests_folder: raise ValueError( "CF_TESTS_FOLDER environment variable not set! Please download the codeforces generated tests and set CF_TESTS_FOLDER to the folder path. See https://huggingface.co/datasets/open-r1/codeforces for more information." ) if not await aiofiles.os.path.exists(tests_folder): raise ValueError( f"CF_TESTS_FOLDER path '{tests_folder}' does not exist! Please download the codeforces generated tests and set CF_TESTS_FOLDER to the folder path. See https://huggingface.co/datasets/open-r1/codeforces for more information." ) parquet_path = os.path.join(tests_folder, f"test_cases_{int(contest_id):04d}.parquet") if not await aiofiles.os.path.exists(parquet_path): return {} # Read parquet file asynchronously async with aiofiles.open(parquet_path, "rb") as f: content = await f.read() df = pd.read_parquet(BytesIO(content)) # Group by problem_id and convert to dictionary of lists grouped_tests = df.groupby("problem_id").apply(lambda x: x[["input", "output"]].to_dict("records")).to_dict() return grouped_tests async def get_generated_tests(problem_id: str) -> list[dict]: contest_id = problem_id.split("/")[0] return (await get_generated_contest_tests(contest_id)).get(problem_id, []) async def score_submission( client: PistonClient, problem_data: dict, submission: str, test_batch_size: int = 1, scoring_mode: Literal["pass_fail", "partial", "weighted_sum"] = "weighted_sum", no_compile_reward: float = -0.1, no_submission_reward: float = -1.0, submission_language: str = "cpp", ) -> float: if submission_language not in ["python", "cpp"]: raise ValueError(f"Invalid submission language: {submission_language}") test_cases = problem_data["official_tests"] + (await get_generated_tests(problem_data["id"])) # invalid/not a coding problem if test_cases is None or len(test_cases) == 0: return None # no code extracted if not submission: return no_submission_reward passed_test_cases = 0 # run one batch, check if any of them failed (0 score): if so stop evaluating (assuming non partial score); otherwise continue with the next batch of test cases. for test_batch_to_run in batched(test_cases, test_batch_size) if test_batch_size >= 1 else [test_cases]: results = await asyncio.gather( *[ asyncio.create_task( score_single_test_case( client, problem_data, test_case["input"], test_case["output"], submission, submission_language ) ) for test_case in test_batch_to_run ] ) if any(result and result["compile"]["code"] != 0 for result in results): return no_compile_reward tests_passed_results = [ result and result["run"]["code"] == 0 and result["run"]["stdout"].strip() == "1" for result in results ] if scoring_mode == "pass_fail" and any(not test_passed for test_passed in tests_passed_results): break passed_test_cases += sum(1 for test_passed in tests_passed_results if test_passed) pass_fail_score = 1.0 if passed_test_cases == len(test_cases) else 0.0 if scoring_mode == "pass_fail": return pass_fail_score elif scoring_mode == "partial": return passed_test_cases / len(test_cases) elif scoring_mode == "weighted_sum": return pass_fail_score + 0.1 * (passed_test_cases / len(test_cases)) else: raise ValueError(f"Invalid scoring mode: {scoring_mode}")
open-r1/src/open_r1/utils/competitive_programming/cf_scoring.py/0
{ "file_path": "open-r1/src/open_r1/utils/competitive_programming/cf_scoring.py", "repo_id": "open-r1", "token_count": 2651 }
226
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from e2b_code_interpreter.models import Execution, ExecutionError from open_r1.rewards import code_reward, ioi_code_reward from open_r1.utils.routed_morph import RoutedMorphSandbox from open_r1.utils.routed_sandbox import RoutedSandbox class TestCodeRewards(unittest.TestCase): def test_python_code_reward(self): # requires E2B, see the README.md file code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled") NUM_SAMPLES = 20 samples = code_dataset["train"].select(range(NUM_SAMPLES)) test_completions = [[{"content": sample["gold_standard_solution"]}] for sample in samples] reward_kwargs = {"verification_info": [sample["verification_info"] for sample in samples]} rewards = code_reward(test_completions, **reward_kwargs) print(rewards) assert rewards == [1.0] * NUM_SAMPLES def test_e2b_router(self): # run router locally: python scripts/e2b_router.py code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled") NUM_SAMPLES = 128 samples = code_dataset["train"].select(range(NUM_SAMPLES)) test_completions = [[{"content": sample["gold_standard_solution"]}] for sample in samples] reward_kwargs = {"verification_info": [sample["verification_info"] for sample in samples]} rewards = code_reward(test_completions, e2b_router_url="0.0.0.0:8000", **reward_kwargs) print(rewards) assert rewards == [1.0] * NUM_SAMPLES def test_e2b_router_parallel(self): # run router locally: python scripts/e2b_router.py code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled") BATCH_SIZE = 32 NUM_SAMPLES = 256 def batch_code_reward(examples): test_completions = [[{"content": solution}] for solution in examples["gold_standard_solution"]] reward_kwargs = { "verification_info": [verification_info for verification_info in examples["verification_info"]] } rewards = code_reward(test_completions, e2b_router_url="0.0.0.0:8000", **reward_kwargs) assert rewards == [1.0] * BATCH_SIZE return examples code_dataset = code_dataset["train"].select(range(NUM_SAMPLES)) code_dataset = code_dataset.map( batch_code_reward, batched=True, batch_size=BATCH_SIZE, num_proc=4, load_from_cache_file=False, ) def test_ioi_code_reward(self): # This slow test case requires spinning up a bunch (I tested with ~64) of piston workers, see docs here # slurm/piston/README.md code_dataset = load_dataset("open-r1/ioi-reward-test-dataset") NUM_SAMPLES = 16 samples = code_dataset["train"].select(range(NUM_SAMPLES)) test_completions = [[{"content": f"```cpp\n{sample['sample_solution']}```"}] for sample in samples] keys = [key for key in samples[0] if key not in ["prompt", "completion"]] reward_kwargs = {key: [example[key] for example in samples] for key in keys} rewards = ioi_code_reward(test_completions, **reward_kwargs) print(rewards) assert rewards == [1.0] * NUM_SAMPLES def test_e2b_router_run_code_success(self): # run router locally: python scripts/e2b_router.py routed_sandbox = RoutedSandbox(router_url="localhost:8000") scripts = [ "print('hello from integration test')", "result = 2 + 2\nprint(result)", ] results = routed_sandbox.run_code(scripts) assert len(results) == 2 for result in results: assert isinstance(result, Execution) # assert result.exit_code == 0 assert result.error is None assert "hello" in result.logs["stdout"][0] or "4" in result.logs["stdout"][0] def test_e2b_router_run_code_with_error(self): # run router locally: python scripts/e2b_router.py routed_sandbox = RoutedSandbox(router_url="localhost:8000") scripts = ["print('this is fine')", "print('unterminated string"] results = routed_sandbox.run_code(scripts) assert len(results) == 2 # First one should be okay # assert results[0].exit_code == 0 # Execution object has no attribute 'exit_code' assert results[0].error is None assert "this is fine" in results[0].logs["stdout"][0] # Second one should have a syntax error # assert results[1].exit_code != 0 # Execution object has no attribute 'exit_code' assert results[1].error is not None assert isinstance(results[1].error, ExecutionError) assert "SyntaxError" in results[1].error.name def test_python_code_reward_morph(self): # requires MorphCloud, see the README.md file code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled") NUM_SAMPLES = 20 samples = code_dataset["train"].select(range(NUM_SAMPLES)) test_completions = [[{"content": sample["gold_standard_solution"]}] for sample in samples] reward_kwargs = { "verification_info": [sample["verification_info"] for sample in samples], "provider_type": "morph", } rewards = code_reward(test_completions, **reward_kwargs) print(rewards) assert rewards == [1.0] * NUM_SAMPLES def test_morph_router(self): # run router locally: python scripts/morph_router.py --port 8001 --max_num_sandboxes 20 code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled") NUM_SAMPLES = 32 samples = code_dataset["train"].select(range(NUM_SAMPLES)) test_completions = [[{"content": sample["gold_standard_solution"]}] for sample in samples] reward_kwargs = { "verification_info": [sample["verification_info"] for sample in samples], "provider_type": "morph", "morph_router_url": "0.0.0.0:8001", } rewards = code_reward(test_completions, **reward_kwargs) print(rewards) assert rewards == [1.0] * NUM_SAMPLES def test_morph_router_parallel(self): # run router locally: python scripts/morph_router.py --port 8001 --max_num_sandboxes 20 code_dataset = load_dataset("open-r1/verifiable-coding-problems-python_decontaminated-tested-shuffled") BATCH_SIZE = 32 NUM_SAMPLES = 256 def batch_code_reward(examples): test_completions = [[{"content": solution}] for solution in examples["gold_standard_solution"]] reward_kwargs = { "verification_info": [verification_info for verification_info in examples["verification_info"]], "provider_type": "morph", "morph_router_url": "0.0.0.0:8001", } rewards = code_reward(test_completions, **reward_kwargs) assert rewards == [1.0] * BATCH_SIZE return examples code_dataset = code_dataset["train"].select(range(NUM_SAMPLES)) code_dataset = code_dataset.map( batch_code_reward, batched=True, batch_size=BATCH_SIZE, num_proc=4, load_from_cache_file=False, ) def test_morph_router_run_code_success(self): # run router locally: python scripts/morph_router.py --port 8001 --max_num_sandboxes 20 routed_sandbox = RoutedMorphSandbox(router_url="localhost:8001") scripts = [ "print('hello from morph integration test')", "result = 2 + 2\nprint(result)", ] results = routed_sandbox.run_code(scripts) assert len(results) == 2 for result in results: assert result.exception_str is None assert "hello" in result.text or "4" in result.text def test_morph_router_run_code_with_error(self): # run router locally: python scripts/morph_router.py --port 8001 --max_num_sandboxes 20 routed_sandbox = RoutedMorphSandbox(router_url="localhost:8001") scripts = ["print('this is fine with morph')", "print('unterminated string"] results = routed_sandbox.run_code(scripts) assert len(results) == 2 # First one should be okay assert results[0].exception_str is None assert "this is fine with morph" in results[0].text # Second one should have a syntax error assert "SyntaxError" in results[1].text if __name__ == "__main__": unittest.main()
open-r1/tests/slow/test_code_reward.py/0
{ "file_path": "open-r1/tests/slow/test_code_reward.py", "repo_id": "open-r1", "token_count": 3946 }
227
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fully Sharded Data Parallel [Fully sharded data parallel](https://pytorch.org/docs/stable/fsdp.html) (FSDP) is developed for distributed training of large pretrained models up to 1T parameters. FSDP achieves this by sharding the model parameters, gradients, and optimizer states across data parallel processes and it can also offload sharded model parameters to a CPU. The memory efficiency afforded by FSDP allows you to scale training to larger batch or model sizes. Both of these features are supported in 🤗 Accelerate, and you can use them with 🤗 PEFT. # Use PEFT and FSDP This section of guide will help you learn how to use our DeepSpeed [training script](https://github.com/huggingface/peft/blob/main/examples/sft/train.py) for performing SFT. You'll configure the script to do SFT (supervised fine-tuning) of Llama-70B model with LoRA and FSDP on 8xH100 80GB GPUs on a single machine. You can configure it to scale to multiple machines by changing the accelerate config. ## Configuration Start by running the following command to [create a FSDP configuration file](https://huggingface.co/docs/accelerate/quicktour#launching-your-distributed-script) with 🤗 Accelerate. The `--config_file` flag allows you to save the configuration file to a specific location, otherwise it is saved as a `default_config.yaml` file in the 🤗 Accelerate cache. The configuration file is used to set the default options when you launch the training script. ```bash accelerate config --config_file fsdp_config.yaml ``` You'll be asked a few questions about your setup, and configure the following arguments. In this example, you'll answer the questionnaire as shown in the image below. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/fsdp-peft-config.png"/> </div> <small>Creating Accelerate's config to use FSDP</small> Once this is done, the corresponding config should look like below and you can find it in config folder at [fsdp_config.yaml](https://github.com/huggingface/peft/blob/main/examples/sft/configs/fsdp_config.yaml): ```yml compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_use_orig_params: false machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` ## Launch command The launch command is available at [run_peft_fsdp.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_peft_fsdp.sh) and it is also shown below: ```bash accelerate launch --config_file "configs/fsdp_config.yaml" train.py \ --seed 100 \ --model_name_or_path "meta-llama/Llama-2-70b-hf" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ --hub_strategy "every_save" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "llama-sft-lora-fsdp" \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --gradient_accumulation_steps 4 \ --gradient_checkpointing True \ --use_reentrant False \ --dataset_text_field "content" \ --use_flash_attn True \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "all-linear" \ --use_4bit_quantization False ``` Notice that we are using LoRA with rank=8, alpha=16 and targeting all linear layers. We are passing the FSDP config file and finetuning the 70B Llama model on a subset of the [ultrachat dataset](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k). ## The important parts Let's dive a little deeper into the script so you can see what's going on, and understand how it works. The first thing to know is that the script uses FSDP for distributed training as the FSDP config has been passed. The [`~trl.SFTTrainer`] class handles all the heavy lifting of creating PEFT model using the peft config that is passed. After that when you call `trainer.train()`, Trainer internally uses 🤗 Accelerate to prepare model, optimizer and trainer using the FSDP config to create FSDP wrapped model which is then trained. The main code snippet is below: ```python # trainer trainer = SFTTrainer( model=model, processing_class=tokenizer, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=peft_config, ) trainer.accelerator.print(f"{trainer.model}") if model_args.use_peft_lora: # handle PEFT+FSDP case trainer.model.print_trainable_parameters() if getattr(trainer.accelerator.state, "fsdp_plugin", None): from peft.utils.other import fsdp_auto_wrap_policy fsdp_plugin = trainer.accelerator.state.fsdp_plugin fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(trainer.model) # train checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint trainer.train(resume_from_checkpoint=checkpoint) # saving final model if trainer.is_fsdp_enabled: trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT") trainer.save_model() ``` Here, one main thing to note currently when using FSDP with PEFT is that `use_orig_params` needs to be `False` to realize GPU memory savings. Due to `use_orig_params=False`, the auto wrap policy for FSDP needs to change so that trainable and non-trainable parameters are wrapped separately. This is done by the code snippt below which uses the util function `fsdp_auto_wrap_policy` from PEFT: ``` if getattr(trainer.accelerator.state, "fsdp_plugin", None): from peft.utils.other import fsdp_auto_wrap_policy fsdp_plugin = trainer.accelerator.state.fsdp_plugin fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(trainer.model) ``` ## Memory usage In the above example, the memory consumed per GPU is 72-80 GB (90-98%) as seen in the screenshot below. The slight increase in GPU memory at the end is when saving the model using `FULL_STATE_DICT` state dict type instead of the `SHARDED_STATE_DICT` so that the model has adapter weights that can be loaded normally with `from_pretrained` method during inference: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/peft_fsdp_mem_usage.png"/> </div> <small>GPU memory usage for the training run</small> # Use PEFT QLoRA and FSDP for finetuning large models on multiple GPUs In this section, we will look at how to use QLoRA and FSDP for finetuning 70B llama model on 2X24GB GPUs. [Answer.AI](https://www.answer.ai/) in collaboration with bitsandbytes and Hugging Face 🤗 open sourced code enabling the usage of FSDP+QLoRA and explained the whole process in their insightful blogpost [You can now train a 70b language model at home](https://www.answer.ai/posts/2024-03-06-fsdp-qlora.html). This is now integrated in Hugging Face ecosystem. For this, we first need `bitsandbytes>=0.43.3`, `accelerate>=1.0.1`, `transformers>4.44.2`, `trl>0.11.4` and `peft>0.13.0`. We need to set `fsdp_cpu_ram_efficient_loading=true`, `fsdp_use_orig_params=false` and `fsdp_offload_params=true`(cpu offloading) when using Accelerate config. When not using accelerate launcher, you can alternately set the environment variable `export FSDP_CPU_RAM_EFFICIENT_LOADING=true`. Here, we will be using accelerate config and below is the config which can be found at [fsdp_config_qlora.yaml](https://github.com/huggingface/peft/blob/main/examples/sft/configs/fsdp_config_qlora.yaml): ```yml compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: true fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_use_orig_params: false machine_rank: 0 main_training_function: main mixed_precision: 'no' num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` Launch command is given below which is available at [run_peft_qlora_fsdp.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_peft_qlora_fsdp.sh): ``` accelerate launch --config_file "configs/fsdp_config_qlora.yaml" train.py \ --seed 100 \ --model_name_or_path "meta-llama/Llama-2-70b-hf" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ --hub_strategy "every_save" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "llama-sft-qlora-fsdp" \ --per_device_train_batch_size 2 \ --per_device_eval_batch_size 2 \ --gradient_accumulation_steps 2 \ --gradient_checkpointing True \ --use_reentrant True \ --dataset_text_field "content" \ --use_flash_attn True \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "all-linear" \ --use_4bit_quantization True \ --use_nested_quant True \ --bnb_4bit_compute_dtype "bfloat16" \ --bnb_4bit_quant_storage_dtype "bfloat16" ``` Notice the new argument being passed, `bnb_4bit_quant_storage_dtype`, which denotes the data type for packing the 4-bit parameters. For example, when it is set to `bfloat16`, **16/4 = 4** 4-bit params are packed together post quantization. When using mixed precision training with `bfloat16`, `bnb_4bit_quant_storage_dtype` can be either `bfloat16` for pure `bfloat16` finetuning, or `float32` for automatic mixed precision (this consumes more GPU memory). When using mixed precision training with `float16`, `bnb_4bit_quant_storage_dtype` should be set to `float32` for stable automatic mixed precision training. In terms of training code, the important code changes are: ```diff ... bnb_config = BitsAndBytesConfig( load_in_4bit=args.use_4bit_quantization, bnb_4bit_quant_type=args.bnb_4bit_quant_type, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=args.use_nested_quant, + bnb_4bit_quant_storage=quant_storage_dtype, ) ... model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, quantization_config=bnb_config, trust_remote_code=True, attn_implementation="flash_attention_2" if args.use_flash_attn else "eager", + torch_dtype=quant_storage_dtype or torch.float32, ) ``` Notice that `torch_dtype` for `AutoModelForCausalLM` is same as the `bnb_4bit_quant_storage` data type. That's it. Everything else is handled by Trainer and TRL. ## Memory usage In the above example, the memory consumed per GPU is **19.6 GB** while CPU RAM usage is around **107 GB**. When disabling CPU offloading, the GPU memory usage is **35.6 GB/ GPU**. Therefore, what took 16X80GB GPUs for full finetuning, 8X80GB GPUs with FSDP+LoRA, and a couple of 80GB GPUs with DDP+QLoRA, now requires 2X24GB GPUs. This makes finetuning of large models more accessible. ## More resources You can also refer the [llama-recipes](https://github.com/facebookresearch/llama-recipes/?tab=readme-ov-file#fine-tuning) repo and [Getting started with Llama](https://llama.meta.com/get-started/#fine-tuning) guide on how to finetune using FSDP and PEFT. ## Caveats 1. Merging when using PEFT and FSDP is currently unsupported and will raise error. 2. Passing `modules_to_save` config parameter to is untested at present. 3. GPU Memory saving when using CPU Offloading is untested at present. 4. When using FSDP+QLoRA, `paged_adamw_8bit` currently results in an error when saving a checkpoint. 5. DoRA training with FSDP should work (albeit at lower speed than LoRA). If combined with bitsandbytes (QDoRA), 4-bit quantization should also work, but 8-bit quantization has known issues and is not recommended.
peft/docs/source/accelerate/fsdp.md/0
{ "file_path": "peft/docs/source/accelerate/fsdp.md", "repo_id": "peft", "token_count": 4781 }
228
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Installation Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 PEFT. 🤗 PEFT is tested on **Python 3.9+**. 🤗 PEFT is available on PyPI, as well as GitHub: ## PyPI To install 🤗 PEFT from PyPI: ```bash pip install peft ``` ## Source New features that haven't been released yet are added every day, which also means there may be some bugs. To try them out, install from the GitHub repository: ```bash pip install git+https://github.com/huggingface/peft ``` If you're working on contributing to the library or wish to play with the source code and see live results as you run the code, an editable version can be installed from a locally-cloned version of the repository: ```bash git clone https://github.com/huggingface/peft cd peft pip install -e .[test] ```
peft/docs/source/install.md/0
{ "file_path": "peft/docs/source/install.md", "repo_id": "peft", "token_count": 439 }
229
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Trainable Tokens The Trainable Tokens method provides a way to target specific token embeddings for fine-tuning without resorting to training the full embedding matrix or using an adapter on the embedding matrix. It is based on the initial implementation from [here](https://github.com/huggingface/peft/pull/1541). The method only targets specific tokens and selectively trains the token indices you specify. Consequently the required RAM will be lower and disk memory is also significantly lower than storing the full fine-tuned embedding matrix. Some preliminary benchmarks acquired with [this script](https://github.com/huggingface/peft/blob/main/scripts/train_memory.py) suggest that for `gemma-2-2b` (which has a rather large embedding matrix) you can save ~4 GiB VRAM with Trainable Tokens over fully fine-tuning the embedding matrix. While LoRA will use comparable amounts of VRAM it might also target tokens you don't want to be changed. Note that these are just indications and varying embedding matrix sizes might skew these numbers a bit. Note that this method does not add tokens for you, you have to add tokens to the tokenizer yourself and resize the embedding matrix of the model accordingly. This method will only re-train the embeddings for the tokens you specify. This method can also be used in conjunction with LoRA layers! See [the LoRA developer guide](../developer_guides/lora#efficiently-train-tokens-alongside-lora). > [!TIP] > Saving the model with [`~PeftModel.save_pretrained`] or retrieving the state dict using > [`get_peft_model_state_dict`] when adding new tokens may save the full embedding matrix instead of only the difference > as a precaution because the embedding matrix was resized. To save space you can disable this behavior by setting > `save_embedding_layers=False` when calling `save_pretrained`. This is safe to do as long as you don't modify the > embedding matrix through other means as well, as such changes will be not tracked by trainable tokens. ## TrainableTokensConfig [[autodoc]] tuners.trainable_tokens.config.TrainableTokensConfig ## TrainableTokensModel [[autodoc]] tuners.trainable_tokens.model.TrainableTokensModel
peft/docs/source/package_reference/trainable_tokens.md/0
{ "file_path": "peft/docs/source/package_reference/trainable_tokens.md", "repo_id": "peft", "token_count": 745 }
230
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The implementation is based on "Parameter-Efficient Orthogonal Finetuning # via Butterfly Factorization" (https://huggingface.co/papers/2311.06243) in ICLR 2024. import os import sys import time from pathlib import Path import numpy as np import torch import torch.utils.checkpoint from accelerate import Accelerator from diffusers import DDIMScheduler from diffusers.utils import check_min_version from safetensors.torch import load_file from tqdm import tqdm from transformers import AutoTokenizer from utils.args_loader import parse_args from utils.dataset import make_dataset from utils.light_controlnet import ControlNetModel from utils.pipeline_controlnet import LightControlNetPipeline from utils.unet_2d_condition import UNet2DConditionNewModel sys.path.append("../../src") from peft import PeftModel # noqa: E402 # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") if torch.xpu.is_available(): device = "xpu:0" elif torch.cuda.is_available(): device = "cuda:0" else: device = "cpu" def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=logging_dir, ) # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) val_dataset = make_dataset(args, tokenizer, accelerator, "test") controlnet_path = args.controlnet_path unet_path = args.unet_path controlnet = ControlNetModel() controlnet.load_state_dict(load_file(controlnet_path)) unet = UNet2DConditionNewModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") unet = PeftModel.from_pretrained(unet, unet_path, adapter_name=args.adapter_name) pipe = LightControlNetPipeline.from_pretrained( args.pretrained_model_name_or_path, controlnet=controlnet, unet=unet.model, torch_dtype=torch.float32, requires_safety_checker=False, ).to(device) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir, exist_ok=True) exist_lst = [int(img.split("_")[-1][:-4]) for img in os.listdir(args.output_dir)] all_lst = np.arange(len(val_dataset)) idx_lst = [item for item in all_lst if item not in exist_lst] print("Number of images to be processed: ", len(idx_lst)) np.random.seed(seed=int(time.time())) np.random.shuffle(idx_lst) for idx in tqdm(idx_lst): output_path = os.path.join(args.output_dir, f"pred_img_{idx:04d}.png") if not os.path.exists(output_path): data = val_dataset[idx.item()] negative_prompt = "low quality, blurry, unfinished" with torch.no_grad(): pred_img = pipe( data["text"], [data["conditioning_pixel_values"]], num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt, ).images[0] pred_img.save(output_path) # control_img = Image.fromarray( # (data["conditioning_pixel_value"] * 255).numpy().transpose(1, 2, 0).astype(np.uint8) # ) # gt_img = Image.fromarray( # ((data["pixel_value"] + 1.0) * 0.5 * 255).numpy().transpose(1, 2, 0).astype(np.uint8) # ) if __name__ == "__main__": args = parse_args() main(args)
peft/examples/boft_controlnet/test_controlnet.py/0
{ "file_path": "peft/examples/boft_controlnet/test_controlnet.py", "repo_id": "peft", "token_count": 1827 }
231
#!/usr/bin/env python # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The implementation is based on "Parameter-Efficient Orthogonal Finetuning # via Butterfly Factorization" (https://huggingface.co/papers/2311.06243) in ICLR 2024. import hashlib import itertools import logging import math import os from contextlib import nullcontext from pathlib import Path import datasets import diffusers import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import Repository from tqdm.auto import tqdm from transformers import AutoTokenizer from utils.args_loader import ( get_full_repo_name, import_model_class_from_model_name_or_path, parse_args, ) from utils.dataset import DreamBoothDataset, PromptDataset, collate_fn from utils.tracemalloc import TorchTracemalloc, b2mb from peft import BOFTConfig, get_peft_model # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.16.0.dev0") logger = get_logger(__name__) UNET_TARGET_MODULES = ["to_q", "to_v", "to_k", "query", "value", "key", "to_out.0", "add_k_proj", "add_v_proj"] TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"] def save_adaptor(accelerator, step, unet, text_encoder, args): unwarpped_unet = accelerator.unwrap_model(unet) unwarpped_unet.save_pretrained( os.path.join(args.output_dir, f"unet/{step}"), state_dict=accelerator.get_state_dict(unet) ) if args.train_text_encoder: unwarpped_text_encoder = accelerator.unwrap_model(text_encoder) unwarpped_text_encoder.save_pretrained( os.path.join(args.output_dir, f"text_encoder/{step}"), state_dict=accelerator.get_state_dict(text_encoder), ) def main(args): validation_prompts = list(filter(None, args.validation_prompt[0].split("."))) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=accelerator_project_config, ) if args.report_to == "wandb": import wandb wandb_init = { "wandb": { "name": args.wandb_run_name, "mode": "online", } } # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. global_seed = hash(args.wandb_run_name) % (2**32) set_seed(global_seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type in ["cuda", "xpu"] else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.xpu.is_available(): torch.xpu.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) # noqa: F841 with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDIMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) if args.use_boft: config = BOFTConfig( boft_block_size=args.boft_block_size, boft_block_num=args.boft_block_num, boft_n_butterfly_factor=args.boft_n_butterfly_factor, target_modules=UNET_TARGET_MODULES, boft_dropout=args.boft_dropout, bias=args.boft_bias, ) unet = get_peft_model(unet, config, adapter_name=args.wandb_run_name) unet.print_trainable_parameters() vae.requires_grad_(False) unet.train() if args.train_text_encoder and args.use_boft: config = BOFTConfig( boft_block_size=args.boft_block_size, boft_block_num=args.boft_block_num, boft_n_butterfly_factor=args.boft_n_butterfly_factor, target_modules=TEXT_ENCODER_TARGET_MODULES, boft_dropout=args.boft_dropout, bias=args.boft_bias, ) text_encoder = get_peft_model(text_encoder, config, adapter_name=args.wandb_run_name) text_encoder.print_trainable_parameters() text_encoder.train() else: text_encoder.requires_grad_(False) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet, vae and text_encoder to device and cast to weight_dtype unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) text_encoder.to(accelerator.device, dtype=weight_dtype) if args.enable_xformers_memory_efficient_attention: if accelerator.device.type == "xpu": logger.warn("XPU hasn't support xformers yet, ignore it.") elif is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # below fails when using boft so commenting it out if args.train_text_encoder and not args.use_boft: text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32 and torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = [param for param in unet.parameters() if param.requires_grad] if args.train_text_encoder: params_to_optimize += [param for param in text_encoder.parameters() if param.requires_grad] optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Download the official dreambooth dataset from the official repository: https://github.com/google/dreambooth.git data_path = os.path.join(os.getcwd(), "data", "dreambooth") if not os.path.exists(data_path): os.makedirs(os.path.join(os.getcwd(), "data"), exist_ok=True) os.system(f"git clone https://github.com/google/dreambooth.git '{data_path}'") # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.num_dataloader_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers(args.wandb_project_name, config=vars(args), init_kwargs=wandb_init) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = resume_global_step // num_update_steps_per_epoch resume_step = resume_global_step % num_update_steps_per_epoch # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") if args.train_text_encoder: text_encoder.train() for epoch in range(first_epoch, args.num_train_epochs): unet.train() with TorchTracemalloc() if not args.no_tracemalloc else nullcontext() as tracemalloc: for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) global_step += 1 if global_step % args.checkpointing_steps == 0 and global_step != 0: if accelerator.is_main_process: save_adaptor(accelerator, global_step, unet, text_encoder, args) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if ( args.validation_prompt is not None and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0 and global_step > 10 ): unet.eval() logger.info( f"Running validation... \n Generating {len(validation_prompts)} images with prompt:" f" {validation_prompts[0]}, ......" ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision, ) # set `keep_fp32_wrapper` to True because we do not want to remove # mixed precision hooks while we are still training pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference if args.seed is not None: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) else: generator = None # images = [] # for _ in range(args.num_validation_images): # image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] # images.append(image) images = [] val_img_dir = os.path.join( args.output_dir, f"validation/{global_step}", args.wandb_run_name, ) os.makedirs(val_img_dir, exist_ok=True) for val_promot in validation_prompts: image = pipeline(val_promot, num_inference_steps=50, generator=generator).images[0] image.save(os.path.join(val_img_dir, f"{'_'.join(val_promot.split(' '))}.png"[1:])) images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": import wandb tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {validation_prompts[i]}") for i, image in enumerate(images) ] } ) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.xpu.is_available(): torch.xpu.empty_cache() if global_step >= args.max_train_steps: break # Printing the accelerator memory usage details such as allocated memory, peak memory, and total memory usage if not args.no_tracemalloc: accelerator.print( f"{accelerator.device.type.upper()} Memory before entering the train : {b2mb(tracemalloc.begin)}" ) accelerator.print( f"{accelerator.device.type.upper()} Memory consumed at the end of the train (end-begin): {tracemalloc.used}" ) accelerator.print( f"{accelerator.device.type.upper()} Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}" ) accelerator.print( f"{accelerator.device.type.upper()} Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}" ) accelerator.print(f"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}") accelerator.print(f"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}") accelerator.print(f"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}") accelerator.print( f"CPU Total Peak Memory consumed during the train (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}" ) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
peft/examples/boft_dreambooth/train_dreambooth.py/0
{ "file_path": "peft/examples/boft_dreambooth/train_dreambooth.py", "repo_id": "peft", "token_count": 12892 }
232
<jupyter_start><jupyter_code>import torch from datasets import load_dataset from transformers import set_seed, AutoModelForSeq2SeqLM, AutoTokenizer from peft import get_peft_model, MultitaskPromptTuningConfig, TaskType, MultitaskPromptTuningInit set_seed(42) device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" model_name = "google/flan-t5-base" peft_config = MultitaskPromptTuningConfig( tokenizer_name_or_path=model_name, num_tasks=2, task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=MultitaskPromptTuningInit.TEXT, num_virtual_tokens=50, num_transformer_submodules=1, prompt_tuning_init_text="classify the following into either positive or negative, or entailment, neutral or contradiction:", ) tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = get_peft_model(model, peft_config) model = model.to(device) def send_to_device(batch): for i in batch: batch[i] = batch[i].to(device) return batch def get_sst2(split: str): examples = load_dataset("sst2")[split] result_examples = [] for example in examples: result_examples.append({}) result_examples[-1]["input"] = example["sentence"].strip() + "</s>" result_examples[-1]["output"] = ( f"positive{tokenizer.eos_token}" if example["label"] == 1 else f"negative{tokenizer.eos_token}" ) result_examples[-1]["task_id"] = 0 return result_examples def get_mnli(split: str): examples = load_dataset("multi_nli")[split] result_examples = [] for example in examples: result_examples.append({}) result_examples[-1]["input"] = example["premise"].strip() + " " + example["hypothesis"].strip() + "</s>" if example["label"] == 0: result_examples[-1]["output"] = f"entailment{tokenizer.eos_token}" elif example["label"] == 1: result_examples[-1]["output"] = f"neutral{tokenizer.eos_token}" else: result_examples[-1]["output"] = f"contradiction{tokenizer.eos_token}" result_examples[-1]["task_id"] = 1 return result_examples from typing import Tuple from torch.utils.data import Dataset, DataLoader import torch class MyDataset(Dataset): def __init__(self, split: str, mode: str = "source") -> None: super().__init__() if split == "train": if mode == "source": self.examples = get_sst2(split) + get_mnli(split) elif mode == "target": self.examples = get_sst2(split) if split == "val": self.examples = get_sst2("validation") if split == "test": self.examples = get_sst2("validation") def __getitem__(self, index) -> dict: return self.examples[index] def __len__(self) -> int: return len(self.examples) def __getitem__(self, index) -> dict: return self.examples[index] def __len__(self) -> int: return len(self.examples) def collate_fn(batch: dict) -> Tuple[torch.Tensor, torch.Tensor]: input = [i["input"] for i in batch] input = tokenizer(input, add_special_tokens=False, return_tensors="pt", padding=True) output = [i["output"] for i in batch] output = tokenizer(output, add_special_tokens=False, return_tensors="pt", padding=True).input_ids output[output == tokenizer.pad_token_id] = -100 task_ids = [i["task_id"] for i in batch] task_ids = torch.tensor(task_ids) return { "input_ids": input.input_ids, "attention_mask": input.attention_mask, "labels": output, "task_ids": task_ids, } train = DataLoader(MyDataset("train"), shuffle=True, batch_size=8, collate_fn=collate_fn) val = DataLoader(MyDataset("val"), shuffle=False, batch_size=8, collate_fn=collate_fn) test = DataLoader(MyDataset("test"), shuffle=False, batch_size=8, collate_fn=collate_fn)<jupyter_output><empty_output><jupyter_text>source training<jupyter_code>from torch.optim.adamw import AdamW from transformers import get_cosine_schedule_with_warmup from tqdm import tqdm from sklearn.metrics import f1_score POSITIVE_TOKEN_ID = tokenizer(" positive", add_special_tokens=False)["input_ids"][0] NEGATIVE_TOKEN_ID = tokenizer(" negative", add_special_tokens=False)["input_ids"][0] def classify(batch): batch = send_to_device(batch) # we pass labels here since we need to generate and peft doesn't support generation yet. # No clue how to get around this scores = model(**batch).logits preds = [] for i in range(scores.shape[0]): if scores[i, 0, POSITIVE_TOKEN_ID] > scores[i, 0, NEGATIVE_TOKEN_ID]: preds.append(POSITIVE_TOKEN_ID) else: preds.append(NEGATIVE_TOKEN_ID) return preds @torch.inference_mode() def evaluate(model, data): loss = 0 preds = [] golds = [] for batch in tqdm(data): batch = send_to_device(batch) loss += model(**batch).loss golds.extend(batch["labels"][:, 0].tolist()) preds.extend(classify(batch)) return loss / len(val), f1_score(golds, preds, pos_label=POSITIVE_TOKEN_ID) optimizer = AdamW(model.parameters(), lr=1e-4) scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train)) n = 1000 step = 0 train_ = tqdm(train) val_loss, f1 = evaluate(model, val) print( f""" before source training val loss = {val_loss} f1 = {f1}""" ) for batch in train_: if step % n == 0: val_loss, f1 = evaluate(model, val) print( f""" step = {step} val loss = {val_loss} f1 = {f1}""" ) model.save_pretrained(f"checkpoints_source/{step}") step += 1 batch = send_to_device(batch) loss = model(**batch).loss loss.backward() optimizer.step() scheduler.step() train_.set_postfix(train_loss=loss)<jupyter_output><empty_output><jupyter_text>target training<jupyter_code>train = DataLoader(MyDataset("train", "target"), shuffle=True, batch_size=8, collate_fn=collate_fn) val = DataLoader(MyDataset("val", "target"), shuffle=False, batch_size=8, collate_fn=collate_fn) test = DataLoader(MyDataset("test", "target"), shuffle=False, batch_size=8, collate_fn=collate_fn)<jupyter_output><empty_output><jupyter_text>create a fresh model<jupyter_code>peft_config = MultitaskPromptTuningConfig( tokenizer_name_or_path=model_name, num_tasks=1, task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=MultitaskPromptTuningInit.EXACT_SOURCE_TASK, prompt_tuning_init_state_dict_path="checkpoints_source/50000/adapter_model.safetensors", num_virtual_tokens=50, num_transformer_submodules=1, ) tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = get_peft_model(model, peft_config) model = model.to(device) optimizer = AdamW(model.parameters(), lr=1e-4) scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train)) n = 1000 step = 0 train_ = tqdm(train) val_loss, f1 = evaluate(model, val) print( f""" before target training val loss = {val_loss} f1 = {f1}""" ) for batch in train_: if step % n == 0: val_loss, f1 = evaluate(model, val) print( f""" step = {step} val loss = {val_loss} f1 = {f1}""" ) model.save_pretrained(f"checkpoints_target/{step}") step += 1 batch = send_to_device(batch) loss = model(**batch).loss loss.backward() optimizer.step() scheduler.step() train_.set_postfix(train_loss=loss) # load last checkpoint for now from peft import set_peft_model_state_dict from safetensors.torch import load_file sd_6000 = load_file("checkpoints_target/6000/adapter_model.safetensors") set_peft_model_state_dict(model, sd_6000) # evaluate val val_loss, f1 = evaluate(model, val) print( f""" final val loss = {val_loss} f1 = {f1}""" ) # evaluate test test_loss, f1 = evaluate(model, test) print( f""" final test loss = {test_loss} f1 = {f1}""" )<jupyter_output><empty_output>
peft/examples/conditional_generation/multitask_prompt_tuning.ipynb/0
{ "file_path": "peft/examples/conditional_generation/multitask_prompt_tuning.ipynb", "repo_id": "peft", "token_count": 3398 }
233
<jupyter_start><jupyter_text>PEFT with DNA Language Models This notebook demonstrates how to utilize parameter-efficient fine-tuning techniques (PEFT) from the PEFT library to fine-tune a DNA Language Model (DNA-LM). The fine-tuned DNA-LM will be applied to solve a task from the nucleotide benchmark dataset. Parameter-efficient fine-tuning (PEFT) techniques are crucial for adapting large pre-trained models to specific tasks with limited computational resources. 1. Import relevant libraries We'll start by importing the required libraries, including the PEFT library and other dependencies.<jupyter_code>import torch import transformers import peft import tqdm import numpy as np<jupyter_output>/opt/homebrew/anaconda3/envs/peft/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm<jupyter_text>2. Load models We'll load a pre-trained DNA Language Model, "SpeciesLM", that serves as the base for fine-tuning. This is done using the transformers library from HuggingFace.The tokenizer and the model comes from the paper, "Species-aware DNA language models capture regulatory elements and their evolution". [Paper Link](https://www.biorxiv.org/content/10.1101/2023.01.26.525670v2), [Code Link](https://github.com/gagneurlab/SpeciesLM). They introduce a species-aware DNA language model, which is trained on more than 800 species spanning over 500 million years of evolution.<jupyter_code>from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("gagneurlab/SpeciesLM", revision = "downstream_species_lm") lm = AutoModelForMaskedLM.from_pretrained("gagneurlab/SpeciesLM", revision = "downstream_species_lm") lm.eval() device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" lm.to(device);<jupyter_output><empty_output><jupyter_text>2. Prepare datasets We'll load the `nucleotide_transformer_downstream_tasks` dataset, which contains 18 downstream tasks from the Nucleotide Transformer paper. This dataset provides a consistent genomics benchmark with binary classification tasks.<jupyter_code>from datasets import load_dataset raw_data_full = load_dataset("InstaDeepAI/nucleotide_transformer_downstream_tasks") raw_data = raw_data_full.filter(lambda example: example['task'] == 'H3')<jupyter_output><empty_output><jupyter_text>We'll use the "H3" subset of this dataset, which contains a total of 13,468 rows in the training data, and 1497 rows in the test data.<jupyter_code>raw_data<jupyter_output><empty_output><jupyter_text>The dataset consists of three columns, ```sequence```, ```name``` and ```label```. An row in this dataset looks like:<jupyter_code>raw_data['train'][0]<jupyter_output><empty_output><jupyter_text>We split out dataset into training, test, and validation sets.<jupyter_code>from datasets import Dataset, DatasetDict train_valid_split = raw_data['train'].train_test_split(test_size=0.15, seed=42) train_valid_split = DatasetDict({ 'train': train_valid_split['train'], 'validation': train_valid_split['test'] }) ds = DatasetDict({ 'train': train_valid_split['train'], 'validation': train_valid_split['validation'], 'test': raw_data['test'] })<jupyter_output><empty_output><jupyter_text>Then, we use the tokenizer and a utility function we created, ```get_kmers``` to generate the final data and labels. The ```get_kmers``` function is essential for generating overlapping 6-mers needed by the language model (LM). By using k=6 and stride=1, we ensure that the model receives continuous and overlapping subsequences, capturing the local context within the biological sequence for more effective analysis and prediction.<jupyter_code>def get_kmers(seq, k=6, stride=1): return [seq[i:i + k] for i in range(0, len(seq), stride) if i + k <= len(seq)] test_sequences = [] train_sequences = [] val_sequences = [] dataset_limit = 200 # NOTE: This dataset limit is set to 200, so that the training runs faster. It can be set to None to use the # entire dataset for i in range(0, len(ds['train'])): if dataset_limit and i == dataset_limit: break sequence = ds['train'][i]['sequence'] sequence = "candida_glabrata " + " ".join(get_kmers(sequence)) sequence = tokenizer(sequence)["input_ids"] train_sequences.append(sequence) for i in range(0, len(ds['validation'])): if dataset_limit and i == dataset_limit: break sequence = ds['validation'][i]['sequence'] sequence = "candida_glabrata " + " ".join(get_kmers(sequence)) sequence = tokenizer(sequence)["input_ids"] val_sequences.append(sequence) for i in range(0, len(ds['test'])): if dataset_limit and i == dataset_limit: break sequence = ds['test'][i]['sequence'] sequence = "candida_glabrata " + " ".join(get_kmers(sequence)) sequence = tokenizer(sequence)["input_ids"] test_sequences.append(sequence) train_labels = ds['train']['label'] test_labels = ds['test']['label'] val_labels = ds['validation']['label'] if dataset_limit: train_labels = train_labels[0:dataset_limit] test_labels = test_labels[0:dataset_limit] val_labels = val_labels[0:dataset_limit]<jupyter_output><empty_output><jupyter_text>Finally, we create a Dataset object for each our sets.<jupyter_code>from datasets import Dataset train_dataset = Dataset.from_dict({"input_ids": train_sequences, "labels": train_labels}) val_dataset = Dataset.from_dict({"input_ids": val_sequences, "labels": val_labels}) test_dataset = Dataset.from_dict({"input_ids": test_sequences, "labels": test_labels})<jupyter_output><empty_output><jupyter_text>4. Train model Now, we'll train our DNA Language Model with the training dataset. We'll add a linear layer in the final layer of our language model, and then, train all the parameteres of our model with the training dataset.<jupyter_code>from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer) import torch from torch import nn class DNA_LM(nn.Module): def __init__(self, model, num_labels): super(DNA_LM, self).__init__() self.model = model.bert self.in_features = model.config.hidden_size self.out_features = num_labels self.classifier = nn.Linear(self.in_features, self.out_features) def forward(self, input_ids, attention_mask=None, labels=None): outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True) sequence_output = outputs.hidden_states[-1] # Use the [CLS] token for classification cls_output = sequence_output[:, 0, :] logits = self.classifier(cls_output) loss = None if labels is not None: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.out_features), labels.view(-1)) return (loss, logits) if loss is not None else logits # Number of classes for your classification task num_labels = 2 classification_model = DNA_LM(lm, num_labels) classification_model.to(device); from transformers import DataCollatorWithPadding data_collator = DataCollatorWithPadding(tokenizer=tokenizer) from transformers import Trainer, TrainingArguments # Define training arguments training_args = TrainingArguments( output_dir='./results', eval_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=5, weight_decay=0.01, eval_steps=1, logging_steps=1, ) # Initialize Trainer trainer = Trainer( model=classification_model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer, data_collator=data_collator, ) # Train the model trainer.train()<jupyter_output><empty_output><jupyter_text>5. Evaluation<jupyter_code># Generate predictions predictions = trainer.predict(test_dataset) logits = predictions.predictions predicted_labels = logits.argmax(axis=-1) print(predicted_labels)<jupyter_output><empty_output><jupyter_text>Then, we create a function to calculate the accuracy from the test and predicted labels.<jupyter_code>def calculate_accuracy(true_labels, predicted_labels): assert len(true_labels) == len(predicted_labels), "Arrays must have the same length" correct_predictions = np.sum(true_labels == predicted_labels) accuracy = correct_predictions / len(true_labels) return accuracy accuracy = calculate_accuracy(test_labels, predicted_labels) print(f"Accuracy: {accuracy:.2f}")<jupyter_output>Accuracy: 0.53<jupyter_text>The results aren't that good, which we can attribute to the small dataset size. 7. Parameter Efficient Fine-Tuning Techniques In this section, we demonstrate how to employ parameter-efficient fine-tuning (PEFT) techniques to adapt a pre-trained model for specific genomics tasks using the PEFT library. The LoraConfig object is instantiated to configure the PEFT parameters:- task_type: Specifies the type of task, in this case, sequence classification (SEQ_CLS).- r: The rank of the LoRA matrices.- lora_alpha: Scaling factor for adaptive re-parameterization.- target_modules: Modules within the model to apply PEFT re-parameterization (query, key, value in this example).- lora_dropout: Dropout rate used during PEFT fine-tuning.<jupyter_code># Number of classes for your classification task num_labels = 2 classification_model = DNA_LM(lm, num_labels) classification_model.to(device); from peft import LoraConfig, TaskType peft_config = LoraConfig( r=8, lora_alpha=32, target_modules=["query", "key", "value"], lora_dropout=0.01, ) from peft import get_peft_model peft_model = get_peft_model(classification_model, peft_config) peft_model.print_trainable_parameters() peft_model # Define training arguments training_args = TrainingArguments( output_dir='./results', eval_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=16, per_device_eval_batch_size=16, num_train_epochs=5, weight_decay=0.01, eval_steps=1, logging_steps=1, ) # Initialize Trainer trainer = Trainer( model=peft_model.model, args=training_args, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer, data_collator=data_collator, ) # Train the model trainer.train()<jupyter_output><empty_output><jupyter_text>8. Evaluate PEFT Model<jupyter_code># Generate predictions predictions = trainer.predict(test_dataset) logits = predictions.predictions predicted_labels = logits.argmax(axis=-1) print(predicted_labels) def calculate_accuracy(true_labels, predicted_labels): assert len(true_labels) == len(predicted_labels), "Arrays must have the same length" correct_predictions = np.sum(true_labels == predicted_labels) accuracy = correct_predictions / len(true_labels) return accuracy accuracy = calculate_accuracy(test_labels, predicted_labels) print(f"Accuracy: {accuracy:.2f}")<jupyter_output>Accuracy: 0.52
peft/examples/dna_language_models/dna_lm.ipynb/0
{ "file_path": "peft/examples/dna_language_models/dna_lm.ipynb", "repo_id": "peft", "token_count": 3835 }
234
<jupyter_start><jupyter_code>import os from PIL import Image import torch from accelerate.logging import get_logger from diffusers import StableDiffusionPipeline from diffusers.utils import check_min_version from peft import PeftModel # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) MODEL_NAME = "stabilityai/stable-diffusion-2-1" PEFT_TYPE="hra" HRA_R=8 SELECTED_SUBJECT="backpack" EPOCH_IDX = 1000 PROJECT_NAME=f"dreambooth_{PEFT_TYPE}" RUN_NAME=f"{SELECTED_SUBJECT}_{PEFT_TYPE}_{HRA_R}" OUTPUT_DIR=f"./data/output/{PEFT_TYPE}" def get_hra_sd_pipeline( ckpt_dir, base_model_name_or_path=None, epoch=int, dtype=torch.float32, device="cuda", adapter_name="default" ): if base_model_name_or_path is None: raise ValueError("Please specify the base model name or path") pipe = StableDiffusionPipeline.from_pretrained( base_model_name_or_path, torch_dtype=dtype, requires_safety_checker=False ).to(device) load_adapter(pipe, ckpt_dir, epoch, adapter_name) if dtype in (torch.float16, torch.bfloat16): pipe.unet.half() pipe.text_encoder.half() pipe.to(device) return pipe def load_adapter(pipe, ckpt_dir, epoch, adapter_name="default"): unet_sub_dir = os.path.join(ckpt_dir, f"unet/{epoch}", adapter_name) text_encoder_sub_dir = os.path.join(ckpt_dir, f"text_encoder/{epoch}", adapter_name) if isinstance(pipe.unet, PeftModel): pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name) else: pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name) else: pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name) def set_adapter(pipe, adapter_name): pipe.unet.set_adapter(adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) prompt = "a purple qwe backpack." negative_prompt = "low quality, blurry, unfinished" device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" %%time pipe = get_hra_sd_pipeline(OUTPUT_DIR, MODEL_NAME, EPOCH_IDX, adapter_name=RUN_NAME, device=device) %%time image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5, negative_prompt=negative_prompt).images[0] image # This is an example. example_image = Image.open("./a_purple_qwe_backpack.png") example_image<jupyter_output><empty_output>
peft/examples/hra_dreambooth/dreambooth_inference.ipynb/0
{ "file_path": "peft/examples/hra_dreambooth/dreambooth_inference.ipynb", "repo_id": "peft", "token_count": 1150 }
235
<jupyter_start><jupyter_text>Finetuning Whisper-large-V2 on Colab using PEFT-Lora + BNB INT8 training In this Colab, we present a step-by-step guide on how to fine-tune Whisper for any multilingual ASR dataset using Hugging Face 🤗 Transformers and 🤗 PEFT. Using 🤗 PEFT and `bitsandbytes`, you can train the `whisper-large-v2` seamlessly on a colab with T4 GPU (16 GB VRAM). In this notebook, with most parts from [fine_tune_whisper.ipynb](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/fine_tune_whisper.ipynbscrollTo=BRdrdFIeU78w) is adapted to train using PEFT LoRA+BNB INT8.For more details on model, datasets and metrics, refer blog [Fine-Tune Whisper For Multilingual ASR with 🤗 Transformers](https://huggingface.co/blog/fine-tune-whisper) initial Setup<jupyter_code>!add-apt-repository -y ppa:jonathonf/ffmpeg-4 !apt update !apt install -y ffmpeg !pip install datasets==3.6.0 !pip install git+https://github.com/huggingface/transformers !pip install librosa !pip install evaluate>=0.30 !pip install jiwer !pip install gradio !pip install -q datasets accelerate !pip install -q git+https://github.com/bitsandbytes-foundation/bitsandbytes.git !pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git@main<jupyter_output><empty_output><jupyter_text>Linking the notebook to the Hub is straightforward - it simply requires entering your Hub authentication token when prompted. Find your Hub authentication token [here](https://huggingface.co/settings/tokens):<jupyter_code>from huggingface_hub import notebook_login notebook_login() # Select CUDA device index import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" model_name_or_path = "openai/whisper-large-v2" language = "Marathi" language_abbr = "mr" task = "transcribe" dataset_name = "mozilla-foundation/common_voice_11_0"<jupyter_output><empty_output><jupyter_text>Load Dataset<jupyter_code>from datasets import load_dataset, DatasetDict common_voice = DatasetDict() common_voice["train"] = load_dataset(dataset_name, language_abbr, split="train+validation") common_voice["test"] = load_dataset(dataset_name, language_abbr, split="test") print(common_voice) common_voice = common_voice.remove_columns( ["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"] ) print(common_voice)<jupyter_output>DatasetDict({ train: Dataset({ features: ['audio', 'sentence'], num_rows: 3927 }) test: Dataset({ features: ['audio', 'sentence'], num_rows: 1816 }) })<jupyter_text>Prepare Feature Extractor, Tokenizer and Data<jupyter_code>from transformers import WhisperFeatureExtractor feature_extractor = WhisperFeatureExtractor.from_pretrained(model_name_or_path) from transformers import WhisperTokenizer tokenizer = WhisperTokenizer.from_pretrained(model_name_or_path, language=language, task=task) from transformers import WhisperProcessor processor = WhisperProcessor.from_pretrained(model_name_or_path, language=language, task=task)<jupyter_output><empty_output><jupyter_text>Prepare Data<jupyter_code>print(common_voice["train"][0])<jupyter_output>{'audio': {'path': '/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3', 'array': array([-1.3727526e-15, -1.2400461e-13, -1.5159097e-13, ..., 4.7928120e-06, 3.5631349e-06, 1.6352631e-06], dtype=float32), 'sampling_rate': 48000}, 'sentence': 'आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.'}<jupyter_text>Since our input audio is sampled at 48kHz, we need to _downsample_ it to 16kHz prior to passing it to the Whisper feature extractor, 16kHz being the sampling rate expected by the Whisper model. We'll set the audio inputs to the correct sampling rate using dataset's [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=cast_columndatasets.DatasetDict.cast_column)method. This operation does not change the audio in-place, but rather signals to `datasets` to resample audio samples _on the fly_ the first time that they are loaded:<jupyter_code>from datasets import Audio common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000))<jupyter_output><empty_output><jupyter_text>Re-loading the first audio sample in the Common Voice dataset will resample it to the desired sampling rate:<jupyter_code>print(common_voice["train"][0])<jupyter_output>{'audio': {'path': '/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3', 'array': array([-4.4097186e-14, -9.4153831e-14, 3.4645775e-13, ..., -7.6018655e-06, -1.8617659e-06, 4.4520480e-06], dtype=float32), 'sampling_rate': 16000}, 'sentence': 'आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.'}<jupyter_text>Now we can write a function to prepare our data ready for the model:1. We load and resample the audio data by calling `batch["audio"]`. As explained above, 🤗 Datasets performs any necessary resampling operations on the fly.2. We use the feature extractor to compute the log-Mel spectrogram input features from our 1-dimensional audio array.3. We encode the transcriptions to label ids through the use of the tokenizer.<jupyter_code>def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch["sentence"]).input_ids return batch<jupyter_output><empty_output><jupyter_text>We can apply the data preparation function to all of our training examples using dataset's `.map` method. The argument `num_proc` specifies how many CPU cores to use. Setting `num_proc` > 1 will enable multiprocessing. If the `.map` method hangs with multiprocessing, set `num_proc=1` and process the dataset sequentially.<jupyter_code>common_voice = common_voice.map(prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2) common_voice["train"]<jupyter_output><empty_output><jupyter_text>Training and Evaluation Define a Data Collator<jupyter_code>import torch from dataclasses import dataclass from typing import Any, Dict, List, Union @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch<jupyter_output><empty_output><jupyter_text>Let's initialise the data collator we've just defined:<jupyter_code>data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)<jupyter_output><empty_output><jupyter_text>Evaluation Metrics We'll use the word error rate (WER) metric, the 'de-facto' metric for assessing ASR systems. For more information, refer to the WER [docs](https://huggingface.co/metrics/wer). We'll load the WER metric from 🤗 Evaluate:<jupyter_code>import evaluate metric = evaluate.load("wer")<jupyter_output><empty_output><jupyter_text>We then simply have to define a function that takes our model predictions and returns the WER metric. This function, called`compute_metrics`, first replaces `-100` with the `pad_token_id`in the `label_ids` (undoing the step we applied in the data collator to ignore padded tokens correctly in the loss).It then decodes the predicted and label ids to strings. Finally,it computes the WER between the predictions and reference labels:<jupyter_code>def compute_metrics(pred): pred_ids = pred.predictions label_ids = pred.label_ids # replace -100 with the pad_token_id label_ids[label_ids == -100] = tokenizer.pad_token_id # we do not want to group tokens when computing the metrics pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True) wer = 100 * metric.compute(predictions=pred_str, references=label_str) return {"wer": wer}<jupyter_output><empty_output><jupyter_text>Load a Pre-Trained Checkpoint Now let's load the pre-trained Whisper `small` checkpoint. Again, this is trivial through use of 🤗 Transformers!<jupyter_code>from transformers import WhisperForConditionalGeneration, BitsAndBytesConfig model = WhisperForConditionalGeneration.from_pretrained(model_name_or_path, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) # model.hf_device_map - this should be {" ": 0}<jupyter_output><empty_output><jupyter_text>Override generation arguments - no tokens are forced as decoder outputs (see [`forced_decoder_ids`](https://huggingface.co/docs/transformers/main_classes/text_generationtransformers.generation_utils.GenerationMixin.generate.forced_decoder_ids)), no tokens are suppressed during generation (see [`suppress_tokens`](https://huggingface.co/docs/transformers/main_classes/text_generationtransformers.generation_utils.GenerationMixin.generate.suppress_tokens)):<jupyter_code>model.config.forced_decoder_ids = None model.config.suppress_tokens = []<jupyter_output><empty_output><jupyter_text>Post-processing on the modelFinally, we need to apply some post-processing on the 8-bit model to enable training, let's freeze all our layers, and cast all non `int8` layers in `float32` for stability.<jupyter_code>from peft import prepare_model_for_kbit_training model = prepare_model_for_kbit_training(model)<jupyter_output><empty_output><jupyter_text>Apply LoRAHere comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`.<jupyter_code>from peft import LoraConfig, PeftModel, LoraModel, LoraConfig, get_peft_model config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") model = get_peft_model(model, config) model.print_trainable_parameters()<jupyter_output>trainable params: 15728640 || all params: 1559033600 || trainable%: 1.0088711365810203<jupyter_text>We are ONLY using **1%** of the total trainable parameters, thereby performing **Parameter-Efficient Fine-Tuning** Define the Training Configuration In the final step, we define all the parameters related to training. For more detail on the training arguments, refer to the Seq2SeqTrainingArguments [docs](https://huggingface.co/docs/transformers/main_classes/trainertransformers.Seq2SeqTrainingArguments).<jupyter_code>from transformers import Seq2SeqTrainingArguments training_args = Seq2SeqTrainingArguments( output_dir="temp", # change to a repo name of your choice per_device_train_batch_size=8, gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size learning_rate=1e-3, warmup_steps=50, num_train_epochs=3, eval_strategy="epoch", fp16=True, per_device_eval_batch_size=8, generation_max_length=128, logging_steps=25, remove_unused_columns=False, # required as the PeftModel forward doesn't have the signature of the wrapped model's forward label_names=["labels"], # same reason as above )<jupyter_output><empty_output><jupyter_text>**Few Important Notes:**1. `remove_unused_columns=False` and `label_names=["labels"]` are required as the PeftModel's forward doesn't have the signature of the base model's forward.2. INT8 training required autocasting. `predict_with_generate` can't be passed to Trainer because it internally calls transformer's `generate` without autocasting leading to errors. 3. Because of point 2, `compute_metrics` shouldn't be passed to `Seq2SeqTrainer` as seen below. (commented out)<jupyter_code>from transformers import Seq2SeqTrainer, TrainerCallback, TrainingArguments, TrainerState, TrainerControl from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "adapter_model") kwargs["model"].save_pretrained(peft_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) return control trainer = Seq2SeqTrainer( args=training_args, model=model, train_dataset=common_voice["train"], eval_dataset=common_voice["test"], data_collator=data_collator, # compute_metrics=compute_metrics, processing_class=processor.feature_extractor, callbacks=[SavePeftModelCallback], ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train() model_name_or_path = "openai/whisper-large-v2" peft_model_id = "smangrul/" + f"{model_name_or_path}-{model.peft_config['default'].peft_type.value}-colab".replace("/", "-") model.push_to_hub(peft_model_id) print(peft_model_id)<jupyter_output>Uploading the following files to smangrul/openai-whisper-large-v2-LORA-colab: adapter_model.bin,adapter_config.json<jupyter_text>Evaluation and Inference **Important points to note while inferencing**:1. As `predict_with_generate` can't be used, we will write the eval loop with `torch.cuda.amp.autocast()` as shown below. 2. As the base model is frozen, PEFT model sometimes fails ot recognise the language while decoding.Hence, we force the starting tokens to mention the language we are transcribing. This is done via `forced_decoder_ids = processor.get_decoder_prompt_ids(language="Marathi", task="transcribe")` and passing that to the `model.generate` call.3. Please note that [AutoEvaluate Leaderboard](https://huggingface.co/spaces/autoevaluate/leaderboards?dataset=mozilla-foundation%2Fcommon_voice_11_0&only_verified=0&task=automatic-speech-recognition&config=mr&split=test&metric=wer) for `mr` language on `common_voice_11_0` has a bug wherein openai's `BasicTextNormalizer` normalizer is used while evaluation leading to degerated output text, an example is shown below:```without normalizer: 'स्विच्चान नरुवित्तीची पद्दत मोठ्या प्रमाणात आमलात आणल्या बसोन या दुपन्याने अनेक राथ प्रवेश केला आहे.'with normalizer: 'स व च च न नर व त त च पद दत म ठ य प रम ण त आमल त आणल य बस न य द पन य न अन क र थ प रव श क ल आह'```Post fixing this bug, we report the 2 metrics for the top model of the leaderboard and the PEFT model:1. `wer`: `wer` without using the `BasicTextNormalizer` as it doesn't cater to most indic languages. This is want we consider as true performance metric.2. `normalized_wer`: `wer` using the `BasicTextNormalizer` to be comparable to the leaderboard metrics.Below are the results:| Model | DrishtiSharma/whisper-large-v2-marathi | smangrul/openai-whisper-large-v2-LORA-colab ||----------------|----------------------------------------|---------------------------------------------|| wer | 35.6457 | 36.1356 || normalized_wer | 13.6440 | 14.0165 |We see that PEFT model's performance is comparable to the fully fine-tuned model on the top of the leaderboard. At the same time, we are able to train the large model in Colab notebook with limited GPU memory and the added advantage of resulting checkpoint being jsut `63` MB.<jupyter_code>from peft import PeftModel, PeftConfig from transformers import WhisperForConditionalGeneration, Seq2SeqTrainer peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) from torch.utils.data import DataLoader from tqdm import tqdm import numpy as np import gc device_type = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" eval_dataloader = DataLoader(common_voice["test"], batch_size=8, collate_fn=data_collator) model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): with torch.amp.autocast(device_type=device_type): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"].to(model.device), decoder_input_ids=batch["labels"][:, :4].to(model.device), max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) metric.add_batch( predictions=decoded_preds, references=decoded_labels, ) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute() print(f"{wer=}")<jupyter_output><empty_output><jupyter_text>Using AutomaticSpeechRecognitionPipeline **Few important notes:**1. `pipe()` should be in the autocast context manager `with torch.cuda.amp.autocast():`2. `forced_decoder_ids` specifying the `language` being transcribed should be provided in `generate_kwargs` dict.3. You will get warning along the below lines which is **safe to ignore**.```The model 'PeftModel' is not supported for . Supported models are ['SpeechEncoderDecoderModel', 'Speech2TextForConditionalGeneration', 'SpeechT5ForSpeechToText', 'WhisperForConditionalGeneration', 'Data2VecAudioForCTC', 'HubertForCTC', 'MCTCTForCTC', 'SEWForCTC', 'SEWDForCTC', 'UniSpeechForCTC', 'UniSpeechSatForCTC', 'Wav2Vec2ForCTC', 'Wav2Vec2ConformerForCTC', 'WavLMForCTC'].```<jupyter_code>import torch import gradio as gr from transformers import ( AutomaticSpeechRecognitionPipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, ) from peft import PeftModel, PeftConfig peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" language = "Marathi" task = "transcribe" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) feature_extractor = processor.feature_extractor forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) pipe = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) def transcribe(audio): with torch.cuda.amp.autocast(): text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] return text iface = gr.Interface( fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text", title="PEFT LoRA + INT8 Whisper Large V2 Marathi", description="Realtime demo for Marathi speech recognition using `PEFT-LoRA+INT8` fine-tuned Whisper Large V2 model.", ) iface.launch(share=True)<jupyter_output><empty_output>
peft/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb/0
{ "file_path": "peft/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb", "repo_id": "peft", "token_count": 7766 }
236
# RandLora: Full-rank parameter-efficient fine-tuning of large models ## Introduction [RandLora](https://huggingface.co/papers/2502.00987) is a parameter-efficient fine-tuning technique that is similar to LoRA and VeRA but performs full rank updates to improve performance. RandLora can be particulary usefull when adapting large model to hard tasks that require complex updates while preserving the parameter efficiency of LoRA. The full rank update of RandLora is acheived by linearly scaling random bases. The random bases are a collection of multiple low rank matrices such that the summation of their ranks if greater or equal to the full rank of the parameter matrices. The trainable parameters of RandLora are two diagonal matrices (vectors) that get multiplied with the right hand low rank random bases, in a similar way to VeRA's update. To maintain low memory usage, RandLora uses a custom function that prevents storing unnecessary bases in memory for backpropagation. ## Quick start ```python import torch from peft import RandLoraConfig, get_peft_model from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer from datasets import load_dataset model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", device_map="auto") tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b") dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") randlora_config = RandLoraConfig() peft_model = get_peft_model(model, lora_config) trainer = transformers.Trainer( model=peft_model, train_dataset=dataset, dataset_text_field="text", max_seq_length=2048, processing_class=tokenizer, ) trainer.train() peft_model.save_pretrained("randlora-llama-7b") ``` There is no additional change needed to your standard PEFT training procedure, simply swap your `LoraConfig` for a `RandLoraConfig`. Note however that RandLora's trainable parameter count is **inversely proportional** to the rank parameter `r`. Lower `r` to increase and increase it to reduce trainable parameters of RandLora. Run the finetuning script simply by running: ```bash python examples/randlora_finetuning/randlora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --data_path timdettmers/openassistant-guanaco ``` This 👆🏻 by default will load the model in peft set up with RandLora config. Now if you wanna quickly compare it with Lora, all you need to do is to input ` --use_lora` in the command line and reduce `--randlora_alpha` to 2x the rank. So same above example would be 👇🏻; ```bash python examples/randlora_finetuning/randlora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --data_path timdettmers/openassistant-guanaco --use_lora --rank 32 --randlora_alpha 64 ``` RandLora can be made to use sparse or very sparse random bases. These sparse matrices can help reduce overfitting. Add `--very_sparse` to run with very sparse matrices or `--sparse` for sparse matrices: ```bash python examples/randlora_finetuning/randlora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --sparse ``` RandLora also supports quantization. To use 4-bit quantization try: ```bash python examples/randlora_finetuning/randlora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --quantize ``` By default the RandLora layers are the key and value layers of LLama model. Adding adapters on more layers will increase memory usage. If you wish to choose a different set of layers for RandLora to be applied on, you can simply define it using: ```bash python examples/randlora_finetuning/randlora_finetuning.py --randlora_target_modules "q_proj,k_proj,v_proj" ``` ### Full example of the script ```bash python randlora_finetuning.py \ --base_model "PATH_TO_MODEL" \ --data_path "PATH_TO_DATASET" \ --output_dir "PATH_TO_OUTPUT_DIR" \ --batch_size 1 \ --num_epochs 3 \ --learning_rate 3e-4 \ --cutoff_len 512 \ --val_set_size 500 \ --quantize \ --eval_step 10 \ --save_step 100 \ --device "auto" \ --rank 32 \ --randlora_alpha 640 \ --randlora_dropout 0.05 \ --randlora_target_modules "k_proj,v_proj" \ --hub_model_id "YOUR_HF_REPO" \ --push_to_hub ``` ## RandLora vs. LoRA RandLora differs from LoRA and other related low rank approximation algorithms by chanllenging the low rank paradigm. RandLora adapters learn **full-rank** updates as the [paper](https://huggingface.co/papers/2502.00987) shows that the low rank constraint of LoRA can constrain performance gains as trainable parameters increase (with higher ranks). As a result, using RandLora is specifically recommended for difficult tasks that are underfit by LoRA. RandLoRA however also often improves performance for common tasks. If increasing LoRA's rank improves performance for your task, RandLora will most likely outperform. RandLora is expected to increase performance over LoRA for equivalent amounts of trainable parameters, mostly for larger equivalent amounts (> LoRA rank 4). RandLora's performance increase comes with two limitations: 1. Performance is dependent on using a large `randlora_alpha` scaling parameter (usually 20x the basis rank). This large parameter can sometimes make training the update unstable, reduce the learning rate or the scaling parameter if this is the case. 2. Increase training time over LoRA when using very low RandLora basis ranks. ## RandLora vs. VeRA RandLora shares similarities with VeRA in that both algorithms use random basis combinations to address some of LoRA's limitations. The limitations addressed by each algorithm is however different. VeRA aims to reduce trainable parameters beyond rank 1 LoRAs while RandLoRA reduces the performance limitation due to the low rank of the update as the trainable parameter count increases. RandLora is expected to: 1. Improve performance over VeRA when more trainable parameters are required (hard tasks) 2. Reduce memory usage over VeRA thanks to RandLora's random base sharing strategy ## Citation ``` @inproceedings{2025_ICLR_RandLoRA, title="{RandLoRA: Full rank parameter-efficient fine-tuning of large models}", author="Albert, Paul and Zhang, Frederic Z. and Saratchandran, Hemanth and Rodriguez-Opazo, Cristian and van den Hengel, Anton and Abbasnejad, Ehsan", booktitle="{International Conference on Learning Representations (ICLR)}", year="2025" } ```
peft/examples/randlora_finetuning/README.md/0
{ "file_path": "peft/examples/randlora_finetuning/README.md", "repo_id": "peft", "token_count": 1887 }
237
<jupyter_start><jupyter_text>Using VeRA for sequence classification In this example, we fine-tune Roberta on a sequence classification task using VeRA. Imports<jupyter_code>import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_model, VeraConfig, PeftType, ) import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, AutoConfig from tqdm import tqdm<jupyter_output><empty_output><jupyter_text>Parameters<jupyter_code>batch_size = 128 model_name_or_path = "roberta-base" task = "mrpc" peft_type = PeftType.VERA device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" num_epochs = 5 # for best results, increase this number rank = 8 # for best results, increase this number max_length = 128 torch.manual_seed(0) peft_config = VeraConfig( task_type="SEQ_CLS", r=rank, d_initial=0.1, target_modules=["query", "value", "intermediate.dense"], save_projection=True, ) head_lr = 1e-2 vera_lr = 2e-2<jupyter_output><empty_output><jupyter_text>Loading data<jupyter_code>if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=max_length) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size )<jupyter_output><empty_output><jupyter_text>Preparing the VeRA model<jupyter_code>model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True, max_length=None) model = get_peft_model(model, peft_config) model.print_trainable_parameters() optimizer = AdamW( [ {"params": [p for n, p in model.named_parameters() if "vera_lambda_" in n], "lr": vera_lr}, {"params": [p for n, p in model.named_parameters() if "classifier" in n], "lr": head_lr}, ] ) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)<jupyter_output>0%| | 0/29 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|██████████| 29/29 [00:18<00:00, 1.58it/s] 100%|██████████| 4/4 [00:01<00:00, 3.52it/s]<jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>account_id = ... # your Hugging Face Hub account ID model.push_to_hub(f"{account_id}/roberta-large-peft-vera")<jupyter_output><empty_output><jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch from peft import PeftModel, PeftConfig from transformers import AutoTokenizer peft_model_id = f"{account_id}/roberta-large-peft-vera" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Vera model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)<jupyter_output>0%| | 0/4 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:01<00:00, 3.14it/s]
peft/examples/sequence_classification/VeRA.ipynb/0
{ "file_path": "peft/examples/sequence_classification/VeRA.ipynb", "repo_id": "peft", "token_count": 2571 }
238
# Makefile for running MetaMathQA experiments. # --- Configuration --- PYTHON := python RUN_SCRIPT := run.py EXPERIMENTS_DIR := experiments RESULTS_DIR := results # --- Automatic Experiment and Result Discovery --- # 1. Find all experiment directories by looking for adapter_config.json files. # This gives us a list like: experiments/lora/llama-3.2-3B-rank32 ... EXPERIMENT_PATHS := $(shell find $(EXPERIMENTS_DIR) \ -name "adapter_config.json" -or \ -name "training_params.json" | xargs dirname | sort -u) # 2. Define a function to replace all occurrences of a character in a string. # This is needed to replicate the result naming logic from run.py (e.g., "lora/foo" -> "lora-foo"). # Usage: $(call replace-all, string, char_to_replace, replacement_char) replace-all = $(if $(findstring $(2),$(1)),$(call replace-all,$(subst $(2),$(3),$(1)),$(2),$(3)),$(1)) # 3. Define a function to convert an experiment path to its flat result file path. # e.g., "experiments/lora/llama-3.2-3B-rank32" -> "results/lora-llama-3.2-3B-rank32.json" exp_to_res = $(RESULTS_DIR)/$(call replace-all,$(patsubst $(EXPERIMENTS_DIR)/%,%,$(1)),/,--).json # 4. Generate the list of all target result files we want to build. RESULT_FILES := $(foreach exp,$(EXPERIMENT_PATHS),$(call exp_to_res,$(exp))) # --- Main Rules --- # The default 'all' target depends on all possible result files. # Running `make` or `make all` will check and run any outdated or missing experiments. all: $(RESULT_FILES) # --- Dynamic Rule Generation --- # This is the core logic. We dynamically generate a specific Makefile rule for each experiment found. # This avoids a complex pattern rule and makes the logic clearer. define EXPERIMENT_template # Input $1: The full experiment path (e.g., experiments/lora/llama-3.2-3B-rank32) # Define the rule: # The target is the result file (e.g., results/lora-llama-3.2-3B-rank32.json). # The dependencies are its config files, code changes need to be audited manually since they can # vary in degree of importance. Note that we explicitly ignore when the script fails to run # so that the other experiments still have a chance to run. $(call exp_to_res,$(1)): $(wildcard $(1)/adapter_config.json) $(wildcard $(1)/training_params.json) @echo "---" @echo "Running experiment: $(1)" -$(PYTHON) $(RUN_SCRIPT) -v $(1) @echo "Finished: $$@" @echo "---" endef # This command iterates through every found experiment path and evaluates the template, # effectively stamping out a unique, explicit rule for each one. $(foreach exp_path,$(EXPERIMENT_PATHS),$(eval $(call EXPERIMENT_template,$(exp_path)))) # --- Utility Rules --- .PHONY: all clean list dump_rules # The 'clean' rule removes all generated results. clean: @echo "Cleaning results directory..." @([ -n "$(wildcard $(RESULTS_DIR)/*.json)" ] && rm $(RESULTS_DIR)/*.json) || exit 0 # The 'list' rule is for debugging. It shows the discovered experiments # and the result files the Makefile expects to create for them. list: @echo "Discovered experiment configurations:" @$(foreach exp,$(EXPERIMENT_PATHS),echo " - $(exp)/adapter_config.json";) @echo "\nTarget result files:" @$(foreach res,$(RESULT_FILES),echo " - $(res)";) # The 'dump_rules' rule is for debugging. It dumps all dynamically defined rules. define newline endef define DUMPED_RULES $(foreach exp_path,$(EXPERIMENT_PATHS),$(call EXPERIMENT_template,$(exp_path))) endef dump_rules: @echo -e "$(subst $(newline),\n,${DUMPED_RULES})"
peft/method_comparison/MetaMathQA/Makefile/0
{ "file_path": "peft/method_comparison/MetaMathQA/Makefile", "repo_id": "peft", "token_count": 1187 }
239
{ "auto_mapping": null, "base_model_name_or_path": null, "exclude_modules": null, "fan_in_fan_out": false, "feedforward_modules": null, "inference_mode": false, "init_ia3_weights": true, "modules_to_save": null, "peft_type": "IA3", "revision": null, "target_modules": null, "task_type": null }
peft/method_comparison/MetaMathQA/experiments/ia3/llama-3.2-3B-default/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/ia3/llama-3.2-3B-default/adapter_config.json", "repo_id": "peft", "token_count": 131 }
240
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for PEFT benchmarking. """ import datetime import json import os import platform import subprocess from dataclasses import asdict, dataclass, field from enum import Enum from typing import Any, Callable, Optional from peft.utils import infer_device import psutil import torch FILE_NAME_BENCHMARK_PARAMS = "benchmark_params.json" FILE_NAME_DEFAULT_CONFIG = "default_benchmark_params.json" RESULT_PATH = os.path.join(os.path.dirname(__file__), "results") RESULT_PATH_TEMP = os.path.join(os.path.dirname(__file__), "temporary_results") RESULT_PATH_CANCELLED = os.path.join(os.path.dirname(__file__), "cancelled_results") class BenchmarkStatus(Enum): """Status of a benchmark run.""" SUCCESS = "success" FAILED = "failed" CANCELLED = "cancelled" RUNNING = "running" @dataclass class BenchmarkResult: """Container for benchmark results.""" experiment_name: str status: BenchmarkStatus model_id: str run_info: dict = field(default_factory=dict) generation_info: dict = field(default_factory=dict) meta_info: dict = field(default_factory=dict) def __post_init__(self): """Initialize structured data format.""" device = infer_device() torch_accelerator_module = getattr(torch, device, torch.cuda) self.run_info = { "timestamp": datetime.datetime.now(tz=datetime.timezone.utc).isoformat(), "duration": 0.0, "status": self.status.value, "hardware": { "num_accelerators": torch_accelerator_module.device_count() if torch_accelerator_module.is_available() else 0, "accelerator_type": torch_accelerator_module.get_device_name(0) if torch_accelerator_module.is_available() else "N/A", "cuda_version": torch.version.cuda if torch.cuda.is_available() else "N/A", "pytorch_version": torch.__version__, }, } self.meta_info = { "model_id": self.model_id, "parameters": { "base_params": 0, "trainable_params": 0, "total_params": 0, "param_ratio": 0.0, }, "model_size": { "base_model_size_mb": 0.0, "adapter_size_mb": 0.0, }, "package_info": { "transformers-version": None, "transformers-commit-hash": None, "peft-version": None, "peft-commit-hash": None, "datasets-version": None, "datasets-commit-hash": None, "bitsandbytes-version": None, "bitsandbytes-commit-hash": None, "torch-version": torch.__version__, "torch-commit-hash": None, }, "system_info": { "system": platform.system(), "release": platform.release(), "version": platform.version(), "machine": platform.machine(), "processor": platform.processor(), "accelerator": torch_accelerator_module.get_device_name(0) if torch_accelerator_module.is_available() else "N/A", }, } self.generation_info = { "memory": { "peak_accelerator_memory_mb": 0.0, "peak_ram_memory_mb": 0.0, "memory_logs": [], }, "by_category": {}, "overall": {}, } def update_meta_info(self, param_counts: dict, size_info: dict, package_info: Optional[dict] = None): """Update model metadata information.""" self.meta_info["parameters"].update(param_counts) self.meta_info["model_size"].update(size_info) if package_info: self.meta_info["package_info"].update(package_info) def update_generation_info(self, memory_data: Optional[dict] = None, performance_metrics: Optional[dict] = None): """Update generation performance information, primarily for memory and high-level performance.""" if memory_data: self.generation_info["memory"].update(memory_data) if performance_metrics: # For things like overall tokens/sec if calculated self.generation_info.update(performance_metrics) def add_memory_log(self, stage: str, ram_mb: float, accelerator_allocated_mb: float, accelerator_reserved_mb: float): """Add a memory usage log entry to generation_info.""" self.generation_info["memory"]["memory_logs"].append( { "stage": stage, "ram_mb": ram_mb, "accelerator_allocated_mb": accelerator_allocated_mb, "accelerator_reserved_mb": accelerator_reserved_mb, } ) def add_metrics_for_category(self, category: str, metrics: dict, individual_samples: list = None): """Add metrics for a specific prompt category under generation_info.""" category_data = {"metrics": metrics, "samples": individual_samples if individual_samples is not None else []} self.generation_info["by_category"][category] = category_data def update_run_info( self, duration: float, status: BenchmarkStatus, error: Optional[str] = None, peft_config: Optional[dict] = None, benchmark_config: Optional[dict] = None, ): """Update run information.""" self.run_info["duration"] = duration self.run_info["status"] = status.value if error: self.run_info["error"] = error if peft_config: self.run_info["peft_config"] = peft_config if benchmark_config: self.run_info["benchmark_config"] = benchmark_config def compute_overall_metrics(self): """Compute overall metrics across all categories within generation_info.""" if not self.generation_info["by_category"]: return categories = self.generation_info["by_category"] key_metrics = [ "inference_time", "base_inference_time", "inference_overhead_pct", "time_per_token", "generated_tokens", ] for metric in key_metrics: values = [] for category_data in categories.values(): if "metrics" in category_data and metric in category_data["metrics"]: values.append(category_data["metrics"][metric]) if values: self.generation_info["overall"][metric] = sum(values) / len(values) def to_dict(self) -> dict[str, Any]: """Convert result to dictionary.""" self.compute_overall_metrics() return { "run_info": self.run_info, "generation_info": self.generation_info, "meta_info": self.meta_info, } def save(self, path: Optional[str] = None): """Save result to JSON file.""" if path is None: peft_branch = get_peft_branch() if self.status == BenchmarkStatus.CANCELLED: base_path = RESULT_PATH_CANCELLED elif peft_branch != "main": base_path = RESULT_PATH_TEMP elif self.status == BenchmarkStatus.SUCCESS: base_path = RESULT_PATH elif self.status == BenchmarkStatus.FAILED: base_path = RESULT_PATH_CANCELLED else: base_path = RESULT_PATH_TEMP filename = f"{self.experiment_name}.json" path = os.path.join(base_path, filename) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, "w") as f: json.dump(self.to_dict(), f, indent=2) return path @dataclass class BenchmarkConfig: """Configuration for benchmarking PEFT methods.""" model_id: str seed: int num_inference_runs: int max_new_tokens: int dtype: str = "float16" use_4bit: bool = False use_8bit: bool = False category_generation_params: Optional[dict] = None def __post_init__(self) -> None: """Validate configuration.""" if not isinstance(self.model_id, str): raise ValueError(f"Invalid model_id: {self.model_id}") if self.seed < 0: raise ValueError(f"Invalid seed: {self.seed}") if self.num_inference_runs <= 0: raise ValueError(f"Invalid num_inference_runs: {self.num_inference_runs}") if self.max_new_tokens <= 0: raise ValueError(f"Invalid max_new_tokens: {self.max_new_tokens}") @classmethod def from_dict(cls, config_dict: dict) -> "BenchmarkConfig": """Create config from dictionary.""" valid_keys = set(cls.__dataclass_fields__.keys()) filtered_dict = {k: v for k, v in config_dict.items() if k in valid_keys} return cls(**filtered_dict) @classmethod def from_json(cls, json_path: str) -> "BenchmarkConfig": """Load config from JSON file.""" with open(json_path) as f: config_dict = json.load(f) return cls.from_dict(config_dict) def to_dict(self) -> dict[str, Any]: """Convert config to dictionary.""" result = asdict(self) return result def save(self, path: str) -> None: """Save config to JSON file.""" with open(path, "w") as f: json.dump(self.to_dict(), f, indent=2) def merge_from_dict(self, config_dict: dict) -> None: """Merge settings from a dictionary into this config object. Keys in config_dict will override existing attributes. """ for key, value in config_dict.items(): if hasattr(self, key): setattr(self, key, value) def validate_experiment_path(path: str) -> tuple[str, "BenchmarkConfig"]: """Validate experiment path, load and merge configs, and return them.""" if not os.path.exists(path): raise FileNotFoundError(f"Experiment path not found: {path}") path_parts = os.path.normpath(path).split(os.sep) try: experiments_idx = path_parts.index("experiments") except ValueError: experiment_name = os.path.basename(path.rstrip(os.sep)) else: if experiments_idx + 1 < len(path_parts): method_name = path_parts[experiments_idx + 1] remaining_parts = path_parts[experiments_idx + 2 :] if remaining_parts: remaining_name = "-".join(remaining_parts) experiment_name = f"{method_name}--{remaining_name}" else: experiment_name = method_name else: experiment_name = os.path.basename(path.rstrip(os.sep)) default_config_path = os.path.join(os.path.dirname(__file__), FILE_NAME_DEFAULT_CONFIG) experiment_benchmark_params_path = os.path.join(path, FILE_NAME_BENCHMARK_PARAMS) if not os.path.exists(default_config_path): raise FileNotFoundError(f"Default configuration file not found: {default_config_path}. This is required.") benchmark_config = BenchmarkConfig.from_json(default_config_path) print(f"Loaded default configuration from {default_config_path}") if os.path.exists(experiment_benchmark_params_path): with open(experiment_benchmark_params_path) as f: experiment_specific_params = json.load(f) benchmark_config.merge_from_dict(experiment_specific_params) print(f"Loaded and merged experiment-specific parameters from {experiment_benchmark_params_path}") else: print(f"No {FILE_NAME_BENCHMARK_PARAMS} found in {path}. Using only default configuration.") return experiment_name, benchmark_config def get_memory_usage() -> tuple[float, float, float]: """Get current memory usage (RAM and accelerator).""" process = psutil.Process(os.getpid()) ram_usage_bytes = process.memory_info().rss ram_usage_mb = ram_usage_bytes / (1024 * 1024) if torch.cuda.is_available(): accelerator_allocated = torch.cuda.memory_allocated() accelerator_reserved = torch.cuda.memory_reserved() accelerator_allocated_mb = accelerator_allocated / (1024 * 1024) accelerator_reserved_mb = accelerator_reserved / (1024 * 1024) elif torch.xpu.is_available(): accelerator_allocated = torch.xpu.memory_allocated() accelerator_reserved = torch.xpu.memory_reserved() accelerator_allocated_mb = accelerator_allocated / (1024 * 1024) accelerator_reserved_mb = accelerator_reserved / (1024 * 1024) else: accelerator_allocated_mb = 0.0 accelerator_reserved_mb = 0.0 return ram_usage_mb, accelerator_allocated_mb, accelerator_reserved_mb def init_accelerator() -> tuple[float, float]: """Initialize accelerator and return initial memory usage.""" if torch.cuda.is_available(): torch.cuda.init() torch.cuda.empty_cache() _, accelerator_allocated, accelerator_reserved = get_memory_usage() elif torch.xpu.is_available(): torch.xpu.init() torch.xpu.empty_cache() _, accelerator_allocated, accelerator_reserved = get_memory_usage() else: accelerator_allocated = 0.0 accelerator_reserved = 0.0 return accelerator_allocated, accelerator_reserved def get_model_size_mb(model: torch.nn.Module, dtype_bytes: int = 4) -> float: """Calculate model size in MB.""" return sum(p.numel() * dtype_bytes for p in model.parameters()) / (1024 * 1024) def get_peft_branch() -> str: repo_root = os.path.dirname(__file__) return subprocess.check_output("git rev-parse --abbrev-ref HEAD".split(), cwd=repo_root).decode().strip() def log_results( experiment_name: str, benchmark_result: BenchmarkResult, print_fn: Callable = print, ) -> None: """Log benchmark results to console.""" print_fn("\n" + "=" * 50) print_fn(f"Benchmark Results: {experiment_name}") print_fn("=" * 50) print_fn(f"Status: {benchmark_result.run_info.get('status', 'N/A')}") print_fn(f"Duration: {benchmark_result.run_info.get('duration', 0):.2f} seconds") if benchmark_result.run_info.get("status") != BenchmarkStatus.SUCCESS.value: print_fn(f"Error: {benchmark_result.run_info.get('error', 'Unknown error')}") print_fn("=" * 50) return print_fn("\nModel Information:") print_fn(f" Base Model: {benchmark_result.meta_info.get('model_id', 'N/A')}") print_fn("\nParameter Counts:") params = benchmark_result.meta_info.get("parameters", {}) print_fn(f" Base Parameters: {params.get('base_params', 0):,}") print_fn(f" Trainable Parameters: {params.get('trainable_params', 0):,}") print_fn(f" Parameter Ratio: {params.get('param_ratio', 0):.5%}") print_fn("\nModel Size:") size_info = benchmark_result.meta_info.get("model_size", {}) print_fn(f" Base Model: {size_info.get('base_model_size_mb', 0):.2f} MB") print_fn(f" Adapter: {size_info.get('adapter_size_mb', 0):.2f} MB") print_fn("\nMemory Usage (from generation_info):") memory_data = benchmark_result.generation_info.get("memory", {}) print_fn(f" Peak Accelerator Memory: {memory_data.get('peak_accelerator_memory_mb', 0):.2f} MB") print_fn(f" Peak RAM Memory: {memory_data.get('peak_ram_memory_mb', 0):.2f} MB") print_fn("\nDetailed Metrics (from generation_info.by_category):") if benchmark_result.generation_info.get("by_category"): for category, cat_data in benchmark_result.generation_info["by_category"].items(): print_fn(f" Category: {category}") metrics = cat_data.get("metrics", {}) print_fn(f" Inference Time: {metrics.get('inference_time', 0):.4f} seconds") print_fn(f" Base Inference Time: {metrics.get('base_inference_time', 0):.4f} seconds") print_fn(f" Inference Overhead: {metrics.get('inference_overhead_pct', 0):.2f}%") print_fn(f" Time Per Token: {metrics.get('time_per_token', 0):.6f} seconds/token") print_fn(f" Generated Tokens: {metrics.get('generated_tokens', 0):.1f}") samples = cat_data.get("samples", []) if samples: print_fn(f" Number of Samples: {len(samples)}") print_fn( f" Average Generated Tokens: {sum(s.get('generated_tokens', 0) for s in samples) / len(samples):.1f}" ) else: print_fn(" No per-category metrics available.") benchmark_result.compute_overall_metrics() print_fn("\nOverall Metrics (from generation_info.overall):") overall = benchmark_result.generation_info.get("overall") if overall: print_fn(f" Inference Time: {overall.get('inference_time', 0):.4f} seconds") print_fn(f" Base Inference Time: {overall.get('base_inference_time', 0):.4f} seconds") print_fn(f" Inference Overhead: {overall.get('inference_overhead_pct', 0):.2f}%") print_fn(f" Time Per Token: {overall.get('time_per_token', 0):.6f} seconds/token") print_fn(f" Generated Tokens: {overall.get('generated_tokens', 0):.1f}") else: print_fn(" No overall metrics computed.") print_fn("\nSaved results to:", benchmark_result.save()) print_fn("=" * 50)
peft/method_comparison/text_generation_benchmark/utils.py/0
{ "file_path": "peft/method_comparison/text_generation_benchmark/utils.py", "repo_id": "peft", "token_count": 7673 }
241
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Optional from transformers import PreTrainedModel from .auto import MODEL_TYPE_TO_PEFT_MODEL_MAPPING from .config import PeftConfig from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING, PEFT_TYPE_TO_PREFIX_MAPPING from .mixed_model import PeftMixedModel from .peft_model import PeftModel from .tuners.tuners_utils import BaseTuner, BaseTunerLayer from .utils import _prepare_prompt_learning_config def get_peft_model( model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False, autocast_adapter_dtype: bool = True, revision: Optional[str] = None, low_cpu_mem_usage: bool = False, ) -> PeftModel | PeftMixedModel: """ Returns a Peft model object from a model and a config, where the model will be modified in-place. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). mixed (`bool`, `optional`, defaults to `False`): Whether to allow mixing different (compatible) adapter types. autocast_adapter_dtype (`bool`, *optional*): Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights using float16 or bfloat16 to float32, as this is typically required for stable training, and only affect select PEFT tuners. revision (`str`, `optional`, defaults to `main`): The revision of the base model. If this isn't set, the saved peft model will load the `main` revision for the base model low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Leave this setting as False if you intend on training the model, unless the adapter weights will be replaced by different weights before training starts. """ model_config = BaseTuner.get_model_config(model) old_name = peft_config.base_model_name_or_path new_name = model.__dict__.get("name_or_path", None) peft_config.base_model_name_or_path = new_name # Especially in notebook environments there could be a case that a user wants to experiment with different # configuration values. However, it is likely that there won't be any changes for new configs on an already # initialized PEFT model. The best we can do is warn the user about it. if any(isinstance(module, BaseTunerLayer) for module in model.modules()): warnings.warn( "You are trying to modify a model with PEFT for a second time. If you want to reload the model with a " "different config, make sure to call `.unload()` before." ) if (old_name is not None) and (old_name != new_name): warnings.warn( f"The PEFT config's `base_model_name_or_path` was renamed from '{old_name}' to '{new_name}'. " "Please ensure that the correct base model is loaded when loading this checkpoint." ) if revision is not None: if peft_config.revision is not None and peft_config.revision != revision: warnings.warn( f"peft config has already set base model revision to {peft_config.revision}, overwriting with revision {revision}" ) peft_config.revision = revision if ( (isinstance(peft_config, PEFT_TYPE_TO_CONFIG_MAPPING["LORA"])) and (peft_config.init_lora_weights == "eva") and not low_cpu_mem_usage ): warnings.warn( "lora with eva initialization used with low_cpu_mem_usage=False. " "Setting low_cpu_mem_usage=True can improve the maximum batch size possible for eva initialization." ) prefix = PEFT_TYPE_TO_PREFIX_MAPPING.get(peft_config.peft_type) if prefix and adapter_name in prefix: warnings.warn( f"Adapter name {adapter_name} should not be contained in the prefix {prefix}." "This may lead to reinitialization of the adapter weights during loading." ) if mixed: # note: PeftMixedModel does not support autocast_adapter_dtype, so don't pass it return PeftMixedModel(model, peft_config, adapter_name=adapter_name) # We explicitly exclude prompt learning here since prompt learning is specific to the task and needs special # handling in the PEFT model's forward method. if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning: return PeftModel( model, peft_config, adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype, low_cpu_mem_usage=low_cpu_mem_usage, ) if peft_config.is_prompt_learning: peft_config = _prepare_prompt_learning_config(peft_config, model_config) return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type]( model, peft_config, adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype, low_cpu_mem_usage=low_cpu_mem_usage, )
peft/src/peft/mapping_func.py/0
{ "file_path": "peft/src/peft/mapping_func.py", "repo_id": "peft", "token_count": 2245 }
242
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple from dataclasses import dataclass, field from peft.config import PeftConfig from peft.utils import PeftType from .utils import gpt2_compute_query_states, llama_compute_query_states @dataclass class AdaptionPromptConfig(PeftConfig): """Stores the configuration of an [`AdaptionPromptModel`].""" target_modules: str = field( default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."} ) adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"}) adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"}) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.ADAPTION_PROMPT @property def is_adaption_prompt(self) -> bool: """Return True if this is an adaption prompt config.""" return True # Contains the config that is specific to a transformers model type. ModelTypeConfig = namedtuple( "ModelTypeConfig", ["compute_query_states", "target_modules", "k_proj_layer", "v_proj_layer", "o_proj_layer"] ) # Mapping of transformers model types to their specific configuration. TRANSFORMERS_MODEL_CONFIG = { "llama": ModelTypeConfig( compute_query_states=llama_compute_query_states, target_modules="self_attn", k_proj_layer="k_proj", v_proj_layer="v_proj", o_proj_layer="o_proj", ), "mistral": ModelTypeConfig( # same as llama, compute_query_states=llama_compute_query_states, target_modules="self_attn", k_proj_layer="k_proj", v_proj_layer="v_proj", o_proj_layer="o_proj", ), "gpt2": ModelTypeConfig( # piggybacking of off the prior definitions, GPTs attention calculation is different compute_query_states=gpt2_compute_query_states, target_modules="attn", k_proj_layer="c_attn", v_proj_layer=None, o_proj_layer=None, ), } def prepare_config( peft_config: AdaptionPromptConfig, model, ) -> AdaptionPromptConfig: """Prepare the config based on the llama model type.""" if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG: raise ValueError(f"Unsupported model type for adaption prompt: '{model.config.model_type}'.") model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type] if peft_config.target_modules is None: peft_config.target_modules = model_config.target_modules return peft_config
peft/src/peft/tuners/adaption_prompt/config.py/0
{ "file_path": "peft/src/peft/tuners/adaption_prompt/config.py", "repo_id": "peft", "token_count": 1154 }
243
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class C3AConfig(PeftConfig): """This is the configuration class to store the configuration of a [`C3AModel`]. Args: block_size (`int`): block size for C3A, must be divisible by both the input size and the output size of the target layer. If you have no idea what block_size you should use, set it to the greatest common divisor of all input & output sizes of your target layers. Increasing this would result in less parameters. target_modules (`Union[list[str],str]`): The names of the modules to apply C3A to. bias (`str`): Bias type for C3A. Can be 'none', 'all' or 'c3a_only'. If 'all' or 'c3a_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. modules_to_save (`list[str]`):list of modules apart from C3A layers to be set as trainable and saved in the final checkpoint. layers_to_transform (`Union[list[int],int]`): The layer indexes to transform, if this argument is specified, it will apply C3A on the layer indexes that are specified in this list. If a single integer is passed, it will apply C3A on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer pattern is not in the common layers pattern. block_size_pattern (`dict`): The mapping from layer names or regexp expression to block_size which are different from the default specified. For example, `{"model.decoder.layers.0.encoder_attn.k_proj": 1280`} init_weights (`Union[bool, Literal["gaussian", "kaiming_uniform", "xavier_uniform"]]`): Defaults to 'xavier_uniform'. Setting this to `False` also uses 'xavier_uniform'. To set the weights to zeros (thus making C3A a no-op), set the value to `True`. """ block_size: int = field( default=256, metadata={ "help": ( "block size for C3A, must be divisible by both the input size and the output size of the target layer." " If you have no idea what block_size you should use, set it to the greatest common divisor of all" " input & output sizes of your target layers. Increasing this would result in less parameters." ) }, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "list of module names or regex expression of the module names to replace with C3A." " For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " ) }, ) bias: str = field(default="none", metadata={"help": "Bias type for C3A. Can be 'none', 'all' or 'c3a_only'"}) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": ( "list of modules apart from C3A layers to be set as trainable and saved in the final checkpoint." " For example, in Sequence Classification or Token Classification tasks," " the final layer `classifier/score` are randomly initialized" " and as such need to be trainable and saved." ) }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": ( "The layer indexes to transform, is this argument is specified," " PEFT will transform only the layers indexes that are specified inside this list." " If a single integer is passed, PEFT will transform only the layer at this index." " This only works when target_modules is a list of str." ) }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "The layer pattern name, used only if `layers_to_transform` is different to None" " and if the layer pattern is not in the common layers pattern." " This only works when target_modules is a list of str." ) }, ) block_size_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to block_size" " which are different from the default specified." " For example, `{model.decoder.layers.0.encoder_attn.k_proj: 1280`}" ) }, ) init_weights: Optional[Union[bool, Literal["gaussian", "kaiming_uniform", "xavier_uniform"]]] = field( default="xavier_uniform", metadata={ "help": ( "Defaults to 'xavier_uniform'. Setting this to `False` also uses 'xavier_uniform'. To set the weights " "to zeros (thus making C3A a no-op), set the value to `True`." ) }, ) def __post_init__(self): self.peft_type = PeftType.C3A self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
peft/src/peft/tuners/c3a/config.py/0
{ "file_path": "peft/src/peft/tuners/c3a/config.py", "repo_id": "peft", "token_count": 2649 }
244
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .layer import IA3Layer if is_bnb_available(): class Linear8bitLt(torch.nn.Module, IA3Layer): # (IA)^3 implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, is_feedforward: bool, init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # note: no check for self.merged because merging is not supported (yet) if self.disable_adapters: return self.base_layer(x) ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue ia3_scaling *= self.ia3_l[active_adapter].flatten() requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) if requires_conversion: x = x.float() if self.is_feedforward: result = self.base_layer(x * ia3_scaling) expected_dtype = result.dtype else: result = self.base_layer(x) expected_dtype = result.dtype result = result * ia3_scaling if requires_conversion: result = result.to(expected_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "ia3." + rep if is_bnb_4bit_available(): class Linear4bit(torch.nn.Module, IA3Layer): # IA3 implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, is_feedforward: bool, init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # note: no check for self.merged because merging is not supported (yet) if self.disable_adapters: return self.base_layer(x) ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue ia3_scaling *= self.ia3_l[active_adapter].flatten() requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) if requires_conversion: x = x.float() if self.is_feedforward: result = self.base_layer(x * ia3_scaling) expected_dtype = result.dtype else: result = self.base_layer(x) expected_dtype = result.dtype result = result * ia3_scaling result = result.clone() # adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch. # This has been duplicated here. if requires_conversion: result = result.to(expected_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "ia3." + rep
peft/src/peft/tuners/ia3/bnb.py/0
{ "file_path": "peft/src/peft/tuners/ia3/bnb.py", "repo_id": "peft", "token_count": 2193 }
245
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Any import torch from accelerate.utils.imports import is_xpu_available from torch import nn from peft.utils.other import transpose from .dora import DoraConv1dLayer, DoraConv2dLayer, DoraConv3dLayer, DoraEmbeddingLayer, DoraLinearLayer from .layer import Conv1d, Conv2d, Conv3d, Embedding, Linear, LoraVariant, _ConvNd class DoraLinearVariant(LoraVariant): @staticmethod def init(module: Linear, adapter_name: str, **kwargs: Any) -> None: if not module.lora_magnitude_vector: # first dora layer being added, add lora_magnitude_vector to the list of learnable parameters module.adapter_layer_names = module.adapter_layer_names[:] + ("lora_magnitude_vector",) dora_layer = DoraLinearLayer(fan_in_fan_out=getattr(module, "fan_in_fan_out", False)) lora_A = module.lora_A[adapter_name].weight lora_B = module.lora_B[adapter_name].weight place_on_cpu = module.ephemeral_gpu_offload and (lora_A.device.type == "cpu" or lora_B.device.type == "cpu") if module.ephemeral_gpu_offload: if lora_A.device.type in ["cuda", "xpu"]: lora_B = lora_B.to(lora_A.device) else: if lora_B.device.type not in ["cuda", "xpu"]: if is_xpu_available(): lora_B = lora_B.to("xpu") else: lora_B = lora_B.to("cuda") lora_A = lora_A.to(lora_B.device) scaling = module.scaling[adapter_name] dora_layer.update_layer( base_layer=module.get_base_layer(), lora_A=lora_A, lora_B=lora_B, scaling=scaling, place_on_cpu=place_on_cpu, ) module.lora_magnitude_vector[adapter_name] = dora_layer @staticmethod def merge_safe(module: Linear, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) # since delta_weight already includes scaling, set it to 1 here weight_norm = ( module.lora_magnitude_vector[active_adapter] .get_weight_norm(orig_weight, transpose(delta_weight, module.fan_in_fan_out), scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value module._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm dora_factor = transpose(dora_factor.view(-1, 1), module.fan_in_fan_out) new_weight = dora_factor * (orig_weight + delta_weight) new_weight = new_weight.to(orig_dtype) return new_weight @staticmethod def merge_unsafe(module: Linear, active_adapter: str, orig_weight: torch.Tensor) -> None: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) weight_norm = ( module.lora_magnitude_vector[active_adapter] .get_weight_norm(orig_weight, transpose(delta_weight, module.fan_in_fan_out), scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value module._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm dora_factor = transpose(dora_factor.view(-1, 1), module.fan_in_fan_out) new_weight = dora_factor * (orig_weight.data + delta_weight) new_weight = new_weight.to(orig_dtype) orig_weight.data = new_weight @staticmethod def unmerge(module: Linear, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) weight_norm = module._cache_pop(f"{active_adapter}-weight_norm") dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm new_weight = orig_weight.data / dora_factor.view(-1, 1) - delta_weight new_weight = new_weight.to(orig_dtype) return new_weight @staticmethod def forward(module: Linear, active_adapter: str, x: torch.Tensor, result: torch.Tensor) -> torch.Tensor: lora_A = module.lora_A[active_adapter] lora_B = module.lora_B[active_adapter] dropout = module.lora_dropout[active_adapter] scaling = module.scaling[active_adapter] if isinstance(dropout, nn.Identity) or not module.training: base_result = result else: x = dropout(x) base_result = None result = result + module.lora_magnitude_vector[active_adapter]( x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=module.get_base_layer(), base_result=base_result, ) return result class DoraEmbeddingVariant(DoraLinearVariant): @staticmethod def init(module: Embedding, adapter_name: str, **kwargs: Any) -> None: if module.lora_magnitude_vector is None: # first dora layer being added, add lora_magnitude_vector to the list of learnable parameters module.adapter_layer_names = module.adapter_layer_names[:] + ("lora_magnitude_vector",) dora_layer = DoraEmbeddingLayer(fan_in_fan_out=True) lora_embedding_A = module.lora_embedding_A[adapter_name] lora_embedding_B = module.lora_embedding_B[adapter_name] scaling = module.scaling[adapter_name] dora_layer.update_layer( base_layer=module.get_base_layer(), lora_A=lora_embedding_A, lora_B=lora_embedding_B, scaling=scaling ) module.lora_magnitude_vector[adapter_name] = dora_layer @staticmethod def merge_safe(module: Embedding, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) # since delta_weight already includes scaling, set it to 1 here weight_norm = ( module.lora_magnitude_vector[active_adapter] .get_weight_norm(orig_weight, delta_weight.T, scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value module._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm dora_factor = dora_factor.view(1, -1) new_weight = dora_factor * (orig_weight + delta_weight) new_weight = new_weight.to(orig_dtype) return new_weight @staticmethod def merge_unsafe(module: Embedding, active_adapter: str, orig_weight: torch.Tensor) -> None: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) weight_norm = ( module.lora_magnitude_vector[active_adapter] .get_weight_norm(orig_weight, delta_weight.T, scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value module._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm dora_factor = dora_factor.view(1, -1) new_weight = dora_factor * (orig_weight.data + delta_weight) new_weight = new_weight.to(orig_dtype) orig_weight.data = new_weight @staticmethod def unmerge(module: Embedding, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) weight_norm = module._cache_pop(f"{active_adapter}-weight_norm") dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm new_weight = orig_weight.data / dora_factor.view(1, -1) - delta_weight new_weight = new_weight.to(orig_dtype) return new_weight @staticmethod def forward(module: Embedding, active_adapter: str, x: torch.Tensor, result: torch.Tensor) -> torch.Tensor: embedding_A = module.lora_embedding_A[active_adapter].T embedding_B = module.lora_embedding_B[active_adapter].T scaling = module.scaling[active_adapter] mag_norm_scale, dora_result = module.lora_magnitude_vector[active_adapter]( x, lora_A=embedding_A, lora_B=embedding_B, scaling=scaling, base_layer=module.get_base_layer(), embed_fn=module._embed, ) result = mag_norm_scale * result + dora_result return result class _DoraConvNdVariant(LoraVariant): @staticmethod def init_convd_variant(module: _ConvNd, adapter_name: str, dora_layer: nn.Module) -> None: if module.lora_magnitude_vector is None: # first dora layer being added, add lora_magnitude_vector to the list of learnable parameters module.adapter_layer_names = module.adapter_layer_names[:] + ("lora_magnitude_vector",) lora_A = module.lora_A[adapter_name].weight lora_B = module.lora_B[adapter_name].weight scaling = module.scaling[adapter_name] dora_layer.update_layer(base_layer=module.get_base_layer(), lora_A=lora_A, lora_B=lora_B, scaling=scaling) module.lora_magnitude_vector[adapter_name] = dora_layer @staticmethod def merge_safe(module: _ConvNd, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) # since delta_weight already includes scaling, set it to 1 here weight_norm = ( module.lora_magnitude_vector[active_adapter].get_weight_norm(orig_weight, delta_weight, scaling=1).detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value module._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm new_weight = dora_factor.view(*module._get_dora_factor_view()) * (orig_weight + delta_weight) new_weight = new_weight.to(orig_dtype) return new_weight @staticmethod def merge_unsafe(module: _ConvNd, active_adapter: str, orig_weight: torch.Tensor) -> None: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) # since delta_weight already includes scaling, set it to 1 here weight_norm = ( module.lora_magnitude_vector[active_adapter].get_weight_norm(orig_weight, delta_weight, scaling=1).detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value module._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm new_weight = dora_factor.view(*module._get_dora_factor_view()) * (orig_weight.data + delta_weight) new_weight = new_weight.to(orig_dtype) orig_weight.data = new_weight @staticmethod def unmerge(module: _ConvNd, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: orig_dtype = orig_weight.dtype delta_weight = module.get_delta_weight(active_adapter) weight_norm = module._cache_pop(f"{active_adapter}-weight_norm") dora_factor = module.lora_magnitude_vector[active_adapter].weight / weight_norm new_weight = orig_weight.data / dora_factor.view(*module._get_dora_factor_view()) - delta_weight new_weight = new_weight.to(orig_dtype) return new_weight @staticmethod def forward(module: _ConvNd, active_adapter: str, x: torch.Tensor, result: torch.Tensor) -> torch.Tensor: lora_A = module.lora_A[active_adapter] lora_B = module.lora_B[active_adapter] dropout = module.lora_dropout[active_adapter] scaling = module.scaling[active_adapter] if isinstance(dropout, nn.Identity) or not module.training: base_result = result else: x = dropout(x) base_result = None result = result + module.lora_magnitude_vector[active_adapter]( x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=module.get_base_layer(), base_result=base_result, ) return result class DoraConv1dVariant(_DoraConvNdVariant): @staticmethod def init(module: Conv1d, adapter_name: str, **kwargs: Any) -> None: dora_layer = DoraConv1dLayer(fan_in_fan_out=False) _DoraConvNdVariant.init_convd_variant(module, adapter_name, dora_layer=dora_layer) class DoraConv2dVariant(_DoraConvNdVariant): @staticmethod def init(module: Conv2d, adapter_name: str, **kwargs: Any) -> None: dora_layer = DoraConv2dLayer(fan_in_fan_out=False) _DoraConvNdVariant.init_convd_variant(module, adapter_name, dora_layer=dora_layer) class DoraConv3dVariant(_DoraConvNdVariant): @staticmethod def init(module: Conv3d, adapter_name: str, **kwargs: Any) -> None: dora_layer = DoraConv3dLayer(fan_in_fan_out=False) _DoraConvNdVariant.init_convd_variant(module, adapter_name, dora_layer=dora_layer) class QALoraLinearVariant(LoraVariant): @staticmethod def init(module: Linear, adapter_name: str, **kwargs: Any) -> None: """ Initializes QALoRA specific parameters for a given adapter. Args: module (Linear): The linear module to be adapted. adapter_name (str): The name of the adapter. **kwargs: Additional keyword arguments. qalora_group_size (int): The size of groups for pooling. This is expected to be passed. """ if "qalora_group_size" not in kwargs: raise ValueError( "`use_qalora=True` requires 'qalora_group_size' to be provided in kwargs." " Please ensure it is passed from the LoraConfig." ) if module.in_features is not None and module.in_features % kwargs["qalora_group_size"] != 0: raise ValueError( f"`use_qalora=True` requires `module.in_features` ({module.in_features}) to be" f"divisible by 'qalora_group_size' ({kwargs['qalora_group_size']})" ) qalora_group_size = kwargs["qalora_group_size"] if "qalora_group_size" not in module.other_param_names: module.other_param_names = module.other_param_names + ("qalora_group_size",) if not hasattr(module, "qalora_group_size"): module.qalora_group_size = {} module.qalora_group_size[adapter_name] = qalora_group_size old_lora_A_layer = module.lora_A[adapter_name] r = old_lora_A_layer.out_features device = old_lora_A_layer.weight.device dtype = old_lora_A_layer.weight.dtype new_lora_A_layer = nn.Linear( old_lora_A_layer.in_features // module.qalora_group_size[adapter_name], r, bias=False, device=device, dtype=dtype, ) module.lora_A[adapter_name] = new_lora_A_layer @staticmethod def get_delta_weight(module: Linear, active_adapter: str) -> torch.Tensor: raise NotImplementedError("QALoRA for GPTQ layers does not support 'get_delta_weight'.") @staticmethod def merge_safe(module: Linear, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: raise NotImplementedError("QALoRA for GPTQ layers does not support 'safe_merge'.") @staticmethod def merge_unsafe(module: Linear, active_adapter: str, orig_weight: torch.Tensor) -> None: raise NotImplementedError("QALoRA for GPTQ layers does not support 'merge_unsafe'.") @staticmethod def unmerge(module: Linear, active_adapter: str, orig_weight: torch.Tensor) -> torch.Tensor: raise NotImplementedError("QALoRA for GPTQ layers does not support 'unmerge'.") @staticmethod def forward(module: Linear, active_adapter: str, x: torch.Tensor, result: torch.Tensor) -> torch.Tensor: lora_A_weight = module.lora_A[active_adapter].weight lora_B_weight = module.lora_B[active_adapter].weight dropout = module.lora_dropout[active_adapter] scaling = module.scaling[active_adapter] group_size = module.qalora_group_size[active_adapter] x_dropped = dropout(x) if module.training and not isinstance(dropout, nn.Identity) else x orig_shape = x_dropped.shape # Reshape to 2D if len(orig_shape) > 2: x_flat = x_dropped.view(-1, module.in_features) else: x_flat = x_dropped batch_size, in_features = x_flat.shape pooled_features = in_features // group_size x_pooled = x_flat.view(batch_size, pooled_features, group_size).mean(dim=2) x_pooled_scaled = x_pooled * pooled_features # LoRA computation delta = x_pooled_scaled @ lora_A_weight.t() @ lora_B_weight.t() * scaling # Reshape back if len(orig_shape) > 2: delta = delta.view(orig_shape[:-1] + (delta.size(-1),)) return result + delta
peft/src/peft/tuners/lora/variants.py/0
{ "file_path": "peft/src/peft/tuners/lora/variants.py", "repo_id": "peft", "token_count": 8209 }
246
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional import torch from peft.import_utils import is_eetq_available from peft.tuners.oft.layer import OFTLayer from peft.tuners.tuners_utils import BaseTunerLayer if is_eetq_available(): from eetq import EetqLinear class EetqOFTLinear(torch.nn.Module, OFTLayer): def __init__( self, base_layer, adapter_name, r: int = 0, oft_block_size: int = 0, module_dropout: float = 0.0, init_weights: bool = True, coft: bool = False, eps: float = 6e-5, block_share: bool = False, use_cayley_neumann: bool = False, num_cayley_neumann_terms: int = 5, fan_in_fan_out: bool = False, **kwargs, ): super().__init__() OFTLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer( adapter_name, r, oft_block_size=oft_block_size, module_dropout=module_dropout, init_weights=init_weights, coft=coft, eps=eps, block_share=block_share, fan_in_fan_out=fan_in_fan_out, use_cayley_neumann=use_cayley_neumann, num_cayley_neumann_terms=num_cayley_neumann_terms, ) def forward(self, x: torch.Tensor): if self.disable_adapters: return self.quant_linear_module(x) for active_adapter in self.active_adapters: if active_adapter not in self.oft_R.keys(): continue oft_R = self.oft_R[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = x.dtype x = self._cast_input_dtype(x, oft_R.weight.dtype) x = oft_R(x) result = self.quant_linear_module(x) if requires_conversion: result = result.to(expected_dtype) return result def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise AttributeError("Merging LoRA layers is not supported for Eetq layers.") def unmerge(self) -> None: raise AttributeError("Unmerging LoRA layers is not supported for Eetq layers.") def __repr__(self) -> str: rep = super().__repr__() return "oft." + rep def dispatch_eetq( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if is_eetq_available() and isinstance(target_base_layer, EetqLinear): new_module = EetqOFTLinear(target, adapter_name, **kwargs) target.weight = target_base_layer.weight if hasattr(target, "bias"): target.bias = target_base_layer.bias return new_module
peft/src/peft/tuners/oft/eetq.py/0
{ "file_path": "peft/src/peft/tuners/oft/eetq.py", "repo_id": "peft", "token_count": 1865 }
247
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py # with some refactor import torch class PrefixEncoder(torch.nn.Module): r""" The `torch.nn` model to encode the prefix. Args: config ([`PrefixTuningConfig`]): The configuration of the prefix encoder. Example: ```py >>> from peft import PrefixEncoder, PrefixTuningConfig >>> config = PrefixTuningConfig( ... peft_type="PREFIX_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... encoder_hidden_size=768, ... ) >>> prefix_encoder = PrefixEncoder(config) ``` **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder. - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if `prefix_projection` is `True`. - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings. Input shape: (`batch_size`, `num_virtual_tokens`) Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`) """ def __init__(self, config): super().__init__() self.prefix_projection = config.prefix_projection token_dim = config.token_dim num_layers = config.num_layers encoder_hidden_size = config.encoder_hidden_size num_virtual_tokens = config.num_virtual_tokens if self.prefix_projection and not config.inference_mode: # Use a two-layer MLP to encode the prefix self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim) self.transform = torch.nn.Sequential( torch.nn.Linear(token_dim, encoder_hidden_size), torch.nn.Tanh(), torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim), ) else: self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim) def forward(self, prefix: torch.Tensor): if self.prefix_projection: prefix_tokens = self.embedding(prefix) past_key_values = self.transform(prefix_tokens) else: past_key_values = self.embedding(prefix) return past_key_values
peft/src/peft/tuners/prefix_tuning/model.py/0
{ "file_path": "peft/src/peft/tuners/prefix_tuning/model.py", "repo_id": "peft", "token_count": 1228 }
248
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import warnings from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge class ShiraLayer(BaseTunerLayer): # List all names of layers that may contain trainable adapter weights adapter_layer_names = ("shira_weight",) # All names of other adapter-related parameters other_param_names = ("r", "scaling", "shira_indices") def __init__(self, base_layer: nn.Module, **kwargs): self.base_layer = base_layer self.r = {} self.scaling = {} self.shira_weight = nn.ParameterDict({}) self.shira_indices = {} self.weight_shape = base_layer.weight.shape # Assumes SHiRA is on some layer with "weight" parameter # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features else: raise NotImplementedError("Only nn.Linear layers supported currently") self.in_features = in_features self.out_features = out_features self.kwargs = kwargs def update_layer( self, adapter_name, mask, r, init_weights: bool = True, ): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.scaling[adapter_name] = ( 1.0 # Default scale during training. Can be set to any (non-negative) value during inference. ) # The number of shira weights in this layer is determined by r such that the total number of weights is the same as a LoRA Layer (for direct comparisons) num_shira_weight = r * (self.in_features + self.out_features) if num_shira_weight > self.in_features * self.out_features: raise ValueError( f"The set rank {r} results in more shira params than the total number of params in the base layer {self.in_features * self.out_features} and this is not allowed." ) # Actual trainable parameters # We have used a vector parameter with fixed indices that we use inside a torch.sparse_coo_tensor in get_delta_weight function. # Directly using a torch.sparse_coo_tensor as a parameter could have been possible but we ran into some issues similar to: # https://github.com/pytorch/pytorch/issues/79542. shira_init_weight = torch.zeros(num_shira_weight) if init_weights else torch.randn(num_shira_weight) self.shira_weight[adapter_name] = nn.Parameter( shira_init_weight.to(self.base_layer.weight.dtype).to(self.base_layer.weight.device), requires_grad=True, ) if mask is not None: # Compute the shira_indices from the mask. Make sure the mask is formed using r*(self.in_features + self.out_features) and not some other K. mask_indices = torch.where(mask == 1.0) self.shira_indices[adapter_name] = torch.cat( [mask_indices[0].unsqueeze(0), mask_indices[1].unsqueeze(0)], 0 ).to(torch.int) self.shira_indices[adapter_name] = self.shira_indices[adapter_name].to(self.base_layer.weight.device) if self.shira_indices[adapter_name].shape[1] != self.shira_weight[adapter_name].shape[0]: raise ValueError( f"The SHiRA indices and weights are not the same dimensions for adapter {adapter_name} in layer {self.base_layer}" ) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def reset_shira_parameters(self, adapter_name): nn.init.zeros_(self.shira_weight[adapter_name]) def set_scale(self, adapter, scale): if adapter not in self.scaling: # Ignore the case where the adapter is not in the layer return self.scaling[adapter] = scale class Linear(nn.Module, ShiraLayer): # SHiRA implemented in a dense layer def __init__( self, base_layer, mask, adapter_name: str, r: int = 0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stored weight like (fan_in, fan_out) init_weights: bool = True, **kwargs, ) -> None: super().__init__() ShiraLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out if self.base_layer is not self.get_base_layer(): raise ValueError("SHiRA does not support nested base layers") self._active_adapter = adapter_name self.update_layer(adapter_name, mask, r, init_weights=init_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.shira_weight.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.shira_weight.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ # In multi-gpu environment, the indices are at the wrong gpu. This is needed to correct this. self.shira_indices[adapter] = self.shira_indices[adapter].to(self.shira_weight[adapter].device) return torch.sparse_coo_tensor( self.shira_indices[adapter], self.shira_weight[adapter] * self.scaling[adapter], self.weight_shape ) def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: new_weight = copy.deepcopy(self.base_layer.weight.data) for active_adapter in self.active_adapters: if active_adapter not in self.shira_weight.keys(): continue new_weight += self.get_delta_weight(active_adapter) result = F.linear(x, new_weight, bias=self.base_layer.bias) return result def __repr__(self) -> str: rep = super().__repr__() return "shira." + rep
peft/src/peft/tuners/shira/layer.py/0
{ "file_path": "peft/src/peft/tuners/shira/layer.py", "repo_id": "peft", "token_count": 3917 }
249
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from transformers import AutoModel from peft import ( AdaLoraConfig, BOFTConfig, BoneConfig, C3AConfig, FourierFTConfig, HRAConfig, IA3Config, LoraConfig, MissConfig, OFTConfig, PrefixTuningConfig, PromptEncoderConfig, PromptLearningConfig, PromptTuningConfig, RoadConfig, ShiraConfig, VBLoRAConfig, VeraConfig, ) from .testing_common import PeftCommonTester from .testing_utils import set_init_weights_false PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST = [ "hf-internal-testing/tiny-random-BertModel", "hf-internal-testing/tiny-random-RobertaModel", "hf-internal-testing/tiny-random-DebertaModel", "hf-internal-testing/tiny-random-DebertaV2Model", ] # TODO Missing from this list are LoKr, LoHa, LN Tuning, add them ALL_CONFIGS = [ ( AdaLoraConfig, { "task_type": "FEATURE_EXTRACTION", "target_modules": None, "total_step": 1, }, ), ( BOFTConfig, { "task_type": "FEATURE_EXTRACTION", "target_modules": None, }, ), ( BoneConfig, { "task_type": "FEATURE_EXTRACTION", "target_modules": None, "r": 2, }, ), ( MissConfig, { "task_type": "FEATURE_EXTRACTION", "target_modules": None, "r": 2, }, ), ( FourierFTConfig, { "task_type": "FEATURE_EXTRACTION", "n_frequency": 10, "target_modules": None, }, ), ( HRAConfig, { "task_type": "FEATURE_EXTRACTION", "target_modules": None, }, ), ( IA3Config, { "task_type": "FEATURE_EXTRACTION", "target_modules": None, "feedforward_modules": None, }, ), ( LoraConfig, { "task_type": "FEATURE_EXTRACTION", "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", }, ), # LoRA + trainable tokens ( LoraConfig, { "task_type": "FEATURE_EXTRACTION", "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", "trainable_token_indices": [0, 1, 3], }, ), ( OFTConfig, { "task_type": "FEATURE_EXTRACTION", "target_modules": None, }, ), ( PrefixTuningConfig, { "task_type": "FEATURE_EXTRACTION", "num_virtual_tokens": 10, }, ), ( PromptEncoderConfig, { "task_type": "FEATURE_EXTRACTION", "num_virtual_tokens": 10, "encoder_hidden_size": 32, }, ), ( PromptTuningConfig, { "task_type": "FEATURE_EXTRACTION", "num_virtual_tokens": 10, }, ), ( RoadConfig, { "task_type": "FEATURE_EXTRACTION", "variant": "road_1", "group_size": 2, }, ), ( ShiraConfig, { "r": 1, "task_type": "FEATURE_EXTRACTION", "target_modules": None, "init_weights": False, }, ), ( VBLoRAConfig, { "task_type": "FEATURE_EXTRACTION", "target_modules": None, "vblora_dropout": 0.05, "vector_length": 1, "num_vectors": 2, }, ), ( VeraConfig, { "task_type": "FEATURE_EXTRACTION", "r": 8, "target_modules": None, "vera_dropout": 0.05, "projection_prng_key": 0xFF, "d_initial": 0.1, "save_projection": True, "bias": "none", }, ), ( C3AConfig, { "task_type": "FEATURE_EXTRACTION", "block_size": 1, "target_modules": None, }, ), ] def skip_non_prompt_learning(config_cls): if not issubclass(config_cls, PromptLearningConfig) or (config_cls == PrefixTuningConfig): pytest.skip("Skip tests that are not prompt learning or that are prefix tuning") def skip_deberta_lora_tests(config_cls, model_id): if "deberta" not in model_id.lower(): return to_skip = ["lora", "ia3", "boft", "vera", "fourierft", "hra", "bone", "randlora"] config_name = config_cls.__name__.lower() if any(k in config_name for k in to_skip): pytest.skip(f"Skip tests that use {config_name} for Deberta models") def skip_deberta_pt_tests(config_cls, model_id): if "deberta" not in model_id.lower(): return to_skip = ["prefix"] config_name = config_cls.__name__.lower() if any(k in config_name for k in to_skip): pytest.skip(f"Skip tests that use {config_name} for Deberta models") class TestPeftFeatureExtractionModel(PeftCommonTester): """ Test if the PeftModel behaves as expected. This includes: - test if the model has the expected methods """ transformers_class = AutoModel def skipTest(self, reason=""): # for backwards compatibility with unittest style test classes pytest.skip(reason) def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return input_dict @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_attributes_parametrized(self, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_adapter_name(self, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_prepare_for_training_parametrized(self, model_id, config_cls, config_kwargs): self._test_prepare_for_training(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained(self, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) def test_load_model_low_cpu_mem_usage(self): self._test_load_model_low_cpu_mem_usage(PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST[0], LoraConfig, {}) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_merge_layers(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_merge_layers(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training(self, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): skip_deberta_pt_tests(config_cls, model_id) self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_layer_indexing(self, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_gradient_checkpointing(self, model_id, config_cls, config_kwargs): skip_deberta_lora_tests(config_cls, model_id) self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_inference_safetensors(self, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_peft_model_device_map(self, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_delete_adapter(self, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs): self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_unload_adapter(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_unload_adapter(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_passing_input_embeds_works(self, model_id, config_cls, config_kwargs): skip_non_prompt_learning(config_cls) self._test_passing_input_embeds_works("test input embeds work", model_id, config_cls, config_kwargs)
peft/tests/test_feature_extraction_models.py/0
{ "file_path": "peft/tests/test_feature_extraction_models.py", "repo_id": "peft", "token_count": 5885 }
250
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import pytest import torch from torch import nn from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, LlavaForConditionalGeneration from peft import LoraConfig, PeftModel, VeraConfig, get_peft_model from peft.utils.other import ModulesToSaveWrapper, _get_no_split_modules class ModelWithModuleDict(nn.Module): def __init__(self): super().__init__() self.other_layer = nn.Linear(10, 10) self.module = nn.ModuleDict({"foo": nn.Linear(10, 10)}) def forward(self): return self.module["foo"](torch.rand(1, 10)) class ModelWithModuleList(nn.Module): def __init__(self): super().__init__() self.other_layer = nn.Linear(10, 10) self.module = nn.ModuleList([nn.Linear(10, 10)]) def forward(self): return self.module[0](torch.rand(1, 10)) class ModelWithParameterDict(nn.Module): def __init__(self): super().__init__() self.other_layer = nn.Linear(10, 10) self.module = nn.ParameterDict({"foo": nn.Parameter(torch.rand(10, 10))}) def forward(self): return self.module["foo"] class ModelWithParameterList(nn.Module): def __init__(self): super().__init__() self.other_layer = nn.Linear(10, 10) self.module = nn.ParameterList([nn.Parameter(torch.rand(10, 10))]) def forward(self): return self.module[0] @pytest.mark.parametrize( "cls", [ModelWithModuleDict, ModelWithModuleList, ModelWithParameterDict, ModelWithParameterList] ) def test_modules_to_save_targets_module_dict_raises(cls): model = cls() peft_config = LoraConfig( target_modules=["other_layer"], modules_to_save=["module"], ) model() # sanity check that the model would normally work msg = "modules_to_save cannot be applied to modules of type" with pytest.raises(TypeError, match=msg): get_peft_model(model=model, peft_config=peft_config) def test_get_peft_model_revision_warning(tmp_path): base_model_id = "peft-internal-testing/tiny-random-BertModel" base_revision = "v2.0.0" base_model = AutoModelForCausalLM.from_pretrained(base_model_id, revision=base_revision).eval() lora_config = LoraConfig(revision=base_revision) overwrite_revision = "main" overwrite_warning = f"peft config has already set base model revision to {base_revision}, overwriting with revision {overwrite_revision}" with pytest.warns(UserWarning, match=overwrite_warning): _ = get_peft_model(base_model, lora_config, revision=overwrite_revision) def test_load_multiple_adapters_different_modules_to_save(tmp_path): # This tests the error described in #2422 where loading multiple adapters with different modules_to_save # attributes fails (due to a regression from #2376). model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-random-LlamaForCausalLM") def peft_config(**kwargs): return LoraConfig(target_modules="all-linear", **kwargs) original_model = copy.deepcopy(model) peft_config_0 = peft_config(modules_to_save=["0.post_attention_layernorm"]) peft_config_1 = peft_config(modules_to_save=["0.post_attention_layernorm"]) peft_config_2 = peft_config(modules_to_save=["1.post_attention_layernorm"]) # Save adapter 0, nothing fancy, should be equal to base model weighs peft_model = get_peft_model(copy.deepcopy(original_model), peft_config_0) peft_model.save_pretrained(tmp_path / "adapter_0") # Save adapter 1, modules to save weights are modified randomly, should be unique to adapter 1 peft_model = get_peft_model(copy.deepcopy(original_model), peft_config_1) peft_model.model.model.layers[0].post_attention_layernorm.weight.data = torch.rand_like( peft_model.model.model.layers[0].post_attention_layernorm.weight.data ) adapter_1_saved = peft_model.model.model.layers[0].post_attention_layernorm.weight.data.clone() peft_model.save_pretrained(tmp_path / "adapter_1") # Save adapter 2, modules to save weights are modified randomly, should be unique to adapter 2 peft_model = get_peft_model(copy.deepcopy(original_model), peft_config_2) peft_model.model.model.layers[1].post_attention_layernorm.weight.data = torch.rand_like( peft_model.model.model.layers[1].post_attention_layernorm.weight.data ) adapter_2_saved = peft_model.model.model.layers[1].post_attention_layernorm.weight.data.clone() peft_model.save_pretrained(tmp_path / "adapter_2") del peft_model combined_model = PeftModel.from_pretrained(original_model, tmp_path / "adapter_0", adapter_name="adapter_0") combined_model.load_adapter(tmp_path / "adapter_1", adapter_name="adapter_1") combined_model.load_adapter(tmp_path / "adapter_2", adapter_name="adapter_2") # For adapter 0 we expect every mentioned modules to save layer of this test to be equal to the original model # since we didn't modify it for adapter 0 and only adapter 0 is active. combined_model.set_adapter("adapter_0") assert torch.allclose( combined_model.model.model.layers[0].post_attention_layernorm.weight, original_model.model.layers[0].post_attention_layernorm.weight, ) assert torch.allclose( combined_model.model.model.layers[1].post_attention_layernorm.weight, original_model.model.layers[1].post_attention_layernorm.weight, ) # For adapter 1 we expect that the modified module to save 0.post_attention_layernorm is modified, the other # module to save layers mentioned above should be untouched. combined_model.set_adapter("adapter_1") assert torch.allclose( combined_model.model.model.layers[0].post_attention_layernorm.weight, adapter_1_saved, ) assert torch.allclose( combined_model.model.model.layers[1].post_attention_layernorm.weight, original_model.model.layers[1].post_attention_layernorm.weight, ) # For adapter 2 we expect its module to save layer (1.post_attention_layernorm) to be modified but the other # module to save weights should be kept original. combined_model.set_adapter("adapter_2") assert torch.allclose( combined_model.model.model.layers[0].post_attention_layernorm.weight, original_model.model.layers[0].post_attention_layernorm.weight, ) assert torch.allclose( combined_model.model.model.layers[1].post_attention_layernorm.weight, adapter_2_saved, ) class TestModulesToSaveAttributeAccess: """Test attribute access on the ModulesToSaveWrapper class. When we have modules_to_save, the original module is wrapped. As long as only forward was called on this wrapped module, we were good. However, if, for instance, model parameters were directly accessed by another module, this would typically fail, as the wrapper does not have this attribute. We had special properties for weight and bias, but this is not enough. Therefore, attribute access is now transiently delegated to the active adapter (or original module, if the adapter is disabled). For one example, see #2099. """ @pytest.fixture def mlp(self): class MLP(nn.Module): def __init__(self): super().__init__() self.lin0 = nn.Linear(1, 2) self.lin1 = nn.Linear(3, 4) return MLP() def test_transient_attribute_access_default_adapter(self, mlp): config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) model = get_peft_model(mlp, config) assert model.lin1.weight is model.lin1.modules_to_save["default"].weight assert model.lin1.bias is model.lin1.modules_to_save["default"].bias def test_transient_attribute_access_non_default_adapter(self, mlp): config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) model = get_peft_model(mlp, config) model.add_adapter("other", config) # at this point, default is still active assert model.lin1.weight is model.lin1.modules_to_save["default"].weight assert model.lin1.bias is model.lin1.modules_to_save["default"].bias assert model.lin1.weight is not model.lin1.modules_to_save["other"].weight assert model.lin1.bias is not model.lin1.modules_to_save["other"].bias model.set_adapter("other") assert model.lin1.weight is not model.lin1.modules_to_save["default"].weight assert model.lin1.bias is not model.lin1.modules_to_save["default"].bias assert model.lin1.weight is model.lin1.modules_to_save["other"].weight assert model.lin1.bias is model.lin1.modules_to_save["other"].bias def test_transient_attribute_access_disabled_adapter(self, mlp): config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) model = get_peft_model(mlp, config) # at this point, default is still active assert model.lin1.weight is model.lin1.modules_to_save["default"].weight assert model.lin1.bias is model.lin1.modules_to_save["default"].bias assert model.lin1.weight is not model.lin1.original_module.weight assert model.lin1.bias is not model.lin1.original_module.bias with model.disable_adapter(): assert model.lin1.weight is not model.lin1.modules_to_save["default"].weight assert model.lin1.bias is not model.lin1.modules_to_save["default"].bias assert model.lin1.weight is model.lin1.original_module.weight assert model.lin1.bias is model.lin1.original_module.bias def test_transient_attribute_access_uninitialized_adapter(self, mlp): # ensure that there is no weird infinite recursion when accessing a non-existing attribute on the class itself with pytest.raises(AttributeError, match="has no attribute 'original_module'"): ModulesToSaveWrapper.original_module def test_transient_attribute_access_attr_does_not_exist_on_modules_to_save(self, mlp): # ensure that there is no weird infinite recursion when accessing a non-existing attribute on the # ModelToSaveWrapper instance config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) model = get_peft_model(mlp, config) with pytest.raises(AttributeError, match="has no attribute 'foo'"): model.lin1.foo def test_transient_attribute_access_attr_does_not_exist_on_original_module(self, mlp): # ensure that there is no weird infinite recursion when accessing a non-existing attribute on the # original module of the ModelToSaveWrapper instance config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) model = get_peft_model(mlp, config) with pytest.raises(AttributeError, match="has no attribute 'foo'"): with model.disable_adapter(): model.lin1.foo def test_transient_attribute_access_non_existing_adapter(self, mlp): # This should normally never happen, as the active adapter should always exist, but it's a failsafe config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) model = get_peft_model(mlp, config) model.base_model.model.lin1._active_adapter = "does-not-exist" with pytest.raises(AttributeError, match="has no attribute 'weight'"): model.lin1.weight class TestModulesToSaveNameSubstringBug: """Test a bug that could occur with multiple modules to save where one adapter's name is a substring of another adapter's name. This bug was the result of an error in the logic of modifying the state_dict for modules_to_save in set_peft_model_state_dict. The error in the logic was that it was checked if an entry from modules_to_save (a set of strings) is a substring of a key of the state_dict. If it was, a new name was assigned to that key in the state_dict, which would allow to load the weight later. The issue that stems from the substring check occurs if there are multiple modules_to_save, and one of them has a name that is a substring of another. So e.g. if one is named "classifier" and the other is named "classifier2", there could be a false match. This bug was reported in #2289. """ def get_model(self): class MyModule(nn.Module): def __init__(self): super().__init__() self.lin = nn.Linear(5, 4) # important: "classifier" is a substring of "classifier2", "classifier3", "classifier4" self.classifier = nn.Linear(4, 2) self.classifier2 = nn.Linear(4, 2) self.classifier3 = nn.Linear(4, 2) self.classifier4 = nn.Linear(4, 2) def forward(self, x): x = self.lin(x) return self.classifier(x) + self.classifier2(x) + self.classifier3(x) + self.classifier4(x) torch.manual_seed(0) return MyModule() @pytest.fixture def path_merged_and_unmerged(self, tmp_path): # Create 2 checkpoints: # 1. merged: the model after calling merge_and_unload # 2. unmerged: the PEFT model saved without calling merge_and_unload path = tmp_path / "model.pt" lora_config = LoraConfig( target_modules=["lin"], # important: "classifier" is a substring of "classifier2", "classifier3", "classifier4" modules_to_save=["classifier", "classifier2", "classifier3", "classifier4"], ) model = get_peft_model(self.get_model(), lora_config) # mock training for _ in range(5): optimizer = torch.optim.SGD(model.parameters(), lr=0.01) output = model(torch.randn(10, 5)) loss = output.sum() loss.backward() optimizer.step() # save the peft model without merging path_unmerged = tmp_path / "unmerged" model.save_pretrained(path_unmerged) # merge the model and save state_dict path_merged = tmp_path / "merged" merged = model.merge_and_unload() state_dict = merged.state_dict() torch.save(state_dict, path_merged) return path_merged, path_unmerged def test_load_merged_and_unmerged_same_weights(self, path_merged_and_unmerged): # Note that this test is quasi flaky, it has a 1 in 4 chance of passing even without the bugfix. It passes when # "classifier" happens to be the last element of the set model.modules_to_save. The order of the set is random. # It is not possible just run this test multiple times to minimize the probability of this happening, because # within the same process, the hash order is consistent. With the bug fix, this doesn't matter, as the test will # always pass, but if there is a regression, there is a 1 in 4 chance of not catching it. Since the CI runs many # tests, it is overall very unlikely that none will catch it though. If you see this test failing in CI, thus be # aware that some of the passing tests may just pass owing to randomness. path_merged, path_unmerged = path_merged_and_unmerged # load the merged model directly state_dict = torch.load(path_merged, weights_only=True) model = self.get_model() model.load_state_dict(state_dict) sd_merged = model.state_dict() del model # load the unmerged model and merge it unmerged = PeftModel.from_pretrained(self.get_model(), path_unmerged) sd_unmerged = unmerged.merge_and_unload().state_dict() assert sd_merged.keys() == sd_unmerged.keys() for key in sd_merged.keys(): param_merged = sd_merged[key] param_unmerged = sd_unmerged[key] assert torch.allclose(param_merged, param_unmerged) class TestTargetingAuxiliaryTrainingWrapper: """AuxiliaryTrainingWrapper such as ModulesToSaveWrapper and TrainableTokensWrapper are in general not to be targeted by PEFT methods such as adapters. For example, a ModulesToSaveWrapper's children modules should not be targeted by `LoraConfig(target_modules='all-linear')`, among other things. """ @pytest.fixture def plain_model_cls(self): class PlainModel(nn.Module): def __init__(self, i, o): super().__init__() self.layer1 = nn.Linear(i, o) def forward(self, x): return self.layer1(x) return PlainModel @pytest.fixture def nested_model_cls(self, plain_model_cls): class NestedModel(nn.Module): def __init__(self): super().__init__() self.layer1 = nn.Linear(10, 20) self.layer2 = nn.Linear(20, 5) self.layer3 = plain_model_cls(5, 10) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) return x return NestedModel def test_nested_ignores_modules_to_save(self, nested_model_cls, plain_model_cls): # Make sure that `target_modules` is not targeting the nested modules of a module marked as module to save. model = nested_model_cls() config = LoraConfig( target_modules=["layer1"], modules_to_save=["layer3"], ) peft_model = get_peft_model(model, config) assert isinstance(peft_model.model.layer3.modules_to_save.default, plain_model_cls) def test_targeting_module_to_save_raises(self, nested_model_cls): model = nested_model_cls() config = LoraConfig( target_modules=["layer1"], modules_to_save=["layer1"], ) msg = "No modules were targeted for adaptation. This might be caused by a combination" with pytest.raises(ValueError, match=msg): get_peft_model(model, config) def test_modules_to_save_targets_tuner_layer_raises(self): # See e.g. issue 2027 and 2477 # Prevent users from (accidentally) targeting the same layer both with a tuner and modules_to_save. Normally, PEFT # will not target the same layer with both a tuner and ModulesToSaveWrapper. However, if modules_to_save is # automatically inferred, e.g. when using AutoModelForSequenceClassification, the ModulesToSaveWrapper is applied ex # post, which can lead to the double wrapping. model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" model = AutoModelForSequenceClassification.from_pretrained(model_id) # Note: target_modules="all-linear" would also work and is closer to the original issue, but let's explicitly target # "score" here in case that "all-linear" will be fixed to no longer target the score layer. peft_config = LoraConfig(target_modules=["score"], task_type="SEQ_CLS") # Since the `score` layer is in `model.modules_to_save` it should be ignored when targeted, # therefore the layer should not be adapted. msg = "No modules were targeted for adaptation. This might be caused by a combination" with pytest.raises(ValueError, match=msg) as e: get_peft_model(model, peft_config) def test_targeting_trainable_tokens_raises(self): model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" model = AutoModelForSequenceClassification.from_pretrained(model_id) peft_config = LoraConfig(target_modules=["embed_tokens"], task_type="SEQ_CLS", trainable_token_indices=[0, 1]) # While this message might not be the most helpful message, at least it is not silently failing msg = "trainable_token_indices cannot be applied to modules of type <class 'peft.tuners.lora.layer.Embedding'>" with pytest.raises(TypeError, match=msg) as e: get_peft_model(model, peft_config) class TestAdapterTargeting: """Make sure that already existing adapters cannot be targeted to avoid conflicts.""" @pytest.fixture def base_model_cls(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.l1 = torch.nn.Linear(10, 20) self.l2 = torch.nn.Conv2d(1, 1, 2) def forward(self, x): return self.l2(self.l1(x)) return M @pytest.mark.parametrize( "config_cls, config_kwargs", [ (LoraConfig, {"target_modules": "l1.*"}), (LoraConfig, {"target_modules": "l2.*"}), (VeraConfig, {"target_modules": "l1.*"}), (VeraConfig, {"target_modules": "(l1|vera_A).*"}), # also target the shared layer ], ) def test_self_targeting_is_ignored(self, base_model_cls, config_cls, config_kwargs): base_model = base_model_cls() config1 = config_cls(**config_kwargs) config2 = config_cls(**config_kwargs) adapter1_name = "ADAPTER_1_512858" # sufficiently unique names to make reliable testing easier adapter2_name = "ADAPTER_2_845781" peft_model = get_peft_model(base_model, config1, adapter_name=adapter1_name) state_dict_keys_1 = peft_model.state_dict().keys() peft_model.add_adapter(adapter2_name, config2) state_dict_keys_2 = peft_model.state_dict().keys() # Ideally there should be no new modules targeted beyond existing ModuleDicts. Therefore the keys # of the new state dict should only differ after the adapter name portion of the keys - not before. # Expected: # - a.b.<adapter_name_1>.xyz # - a.b.<adapter_name_2>.xyz # We're not expecting this to happen and test against it: # - a.b.<adapter_name_1>.xyz # - a.<adapter_name_2>.xyz def remove_adapter_portion(adapter_name, key): if key.endswith(f".{adapter_name}"): return key.removesuffix(f".{adapter_name}") return key.split(f".{adapter_name}.")[0] adapter_invariant_keys1 = {remove_adapter_portion(adapter1_name, key) for key in state_dict_keys_1} adapter_invariant_keys2 = { remove_adapter_portion(adapter2_name, remove_adapter_portion(adapter1_name, key)) for key in state_dict_keys_2 } assert adapter_invariant_keys1 == adapter_invariant_keys2 class TestGetNoSplitModules: # Ensure that children are considered when determining _no_split_modules # see https://github.com/huggingface/transformers/pull/38141 def test_get_no_split_modules_simple(self): # choose a model where recursively visiting children is *not* required model_id = "facebook/opt-125m" model = AutoModelForCausalLM.from_pretrained(model_id) assert model._no_split_modules == ["OPTDecoderLayer"] no_split_modules = _get_no_split_modules(model) assert no_split_modules == {"OPTDecoderLayer"} def test_get_no_split_modules_recursive(self): # choose a model where recursively visiting children is required model_id = "hf-internal-testing/tiny-random-LlavaForConditionalGeneration" model = LlavaForConditionalGeneration.from_pretrained(model_id) # sanity check: just visiting the model itself is not enough: assert model._no_split_modules == [] no_split_modules = _get_no_split_modules(model) assert no_split_modules == {"CLIPEncoderLayer", "LlamaDecoderLayer"}
peft/tests/test_other.py/0
{ "file_path": "peft/tests/test_other.py", "repo_id": "peft", "token_count": 9599 }
251
#!/usr/bin/env python3 """ Bulk Model Script Runner Run validation or benchmark script in separate process for each model Benchmark all 'vit*' models: python bulk_runner.py --model-list 'vit*' --results-file vit_bench.csv benchmark.py --amp -b 512 Validate all models: python bulk_runner.py --model-list all --results-file val.csv --pretrained validate.py --data-dir /imagenet/validation/ --amp -b 512 --retry Hacked together by Ross Wightman (https://github.com/rwightman) """ import argparse import os import sys import csv import json import subprocess import time from typing import Callable, List, Tuple, Union from timm.models import is_model, list_models, get_pretrained_cfg, get_arch_pretrained_cfgs parser = argparse.ArgumentParser(description='Per-model process launcher') # model and results args parser.add_argument( '--model-list', metavar='NAME', default='', help='txt file based list of model names to benchmark') parser.add_argument( '--results-file', default='', type=str, metavar='FILENAME', help='Output csv file for validation results (summary)') parser.add_argument( '--sort-key', default='', type=str, metavar='COL', help='Specify sort key for results csv') parser.add_argument( "--pretrained", action='store_true', help="only run models with pretrained weights") parser.add_argument( "--delay", type=float, default=0, help="Interval, in seconds, to delay between model invocations.", ) parser.add_argument( "--start_method", type=str, default="spawn", choices=["spawn", "fork", "forkserver"], help="Multiprocessing start method to use when creating workers.", ) parser.add_argument( "--no_python", help="Skip prepending the script with 'python' - just execute it directly. Useful " "when the script is not a Python script.", ) parser.add_argument( "-m", "--module", help="Change each process to interpret the launch script as a Python module, executing " "with the same behavior as 'python -m'.", ) # positional parser.add_argument( "script", type=str, help="Full path to the program/script to be launched for each model config.", ) parser.add_argument("script_args", nargs=argparse.REMAINDER) def cmd_from_args(args) -> Tuple[Union[Callable, str], List[str]]: # If ``args`` not passed, defaults to ``sys.argv[:1]`` with_python = not args.no_python cmd: Union[Callable, str] cmd_args = [] if with_python: cmd = os.getenv("PYTHON_EXEC", sys.executable) cmd_args.append("-u") if args.module: cmd_args.append("-m") cmd_args.append(args.script) else: if args.module: raise ValueError( "Don't use both the '--no_python' flag" " and the '--module' flag at the same time." ) cmd = args.script cmd_args.extend(args.script_args) return cmd, cmd_args def _get_model_cfgs( model_names, num_classes=None, expand_train_test=False, include_crop=True, expand_arch=False, ): model_cfgs = set() for name in model_names: if expand_arch: pt_cfgs = get_arch_pretrained_cfgs(name).values() else: pt_cfg = get_pretrained_cfg(name) pt_cfgs = [pt_cfg] if pt_cfg is not None else [] for cfg in pt_cfgs: if cfg.input_size is None: continue if num_classes is not None and getattr(cfg, 'num_classes', 0) != num_classes: continue # Add main configuration size = cfg.input_size[-1] if include_crop: model_cfgs.add((name, size, cfg.crop_pct)) else: model_cfgs.add((name, size)) # Add test configuration if required if expand_train_test and cfg.test_input_size is not None: test_size = cfg.test_input_size[-1] if include_crop: test_crop = cfg.test_crop_pct or cfg.crop_pct model_cfgs.add((name, test_size, test_crop)) else: model_cfgs.add((name, test_size)) # Format the output if include_crop: return [(n, {'img-size': r, 'crop-pct': cp}) for n, r, cp in sorted(model_cfgs)] else: return [(n, {'img-size': r}) for n, r in sorted(model_cfgs)] def main(): args = parser.parse_args() cmd, cmd_args = cmd_from_args(args) model_cfgs = [] if args.model_list == 'all': model_names = list_models( pretrained=args.pretrained, # only include models w/ pretrained checkpoints if set ) model_cfgs = [(n, None) for n in model_names] elif args.model_list == 'all_in1k': model_names = list_models(pretrained=True) model_cfgs = _get_model_cfgs(model_names, num_classes=1000, expand_train_test=True) elif args.model_list == 'all_res': model_names = list_models() model_cfgs = _get_model_cfgs(model_names, expand_train_test=True, include_crop=False, expand_arch=True) elif not is_model(args.model_list): # model name doesn't exist, try as wildcard filter model_names = list_models(args.model_list) model_cfgs = [(n, None) for n in model_names] if not model_cfgs and os.path.exists(args.model_list): with open(args.model_list) as f: model_names = [line.rstrip() for line in f] model_cfgs = _get_model_cfgs( model_names, #num_classes=1000, expand_train_test=True, #include_crop=False, ) if len(model_cfgs): results_file = args.results_file or './results.csv' results = [] errors = [] model_strings = '\n'.join([f'{x[0]}, {x[1]}' for x in model_cfgs]) print(f"Running script on these models:\n {model_strings}") if not args.sort_key: if 'benchmark' in args.script: if any(['train' in a for a in args.script_args]): sort_key = 'train_samples_per_sec' else: sort_key = 'infer_samples_per_sec' else: sort_key = 'top1' else: sort_key = args.sort_key print(f'Script: {args.script}, Args: {args.script_args}, Sort key: {sort_key}') try: for m, ax in model_cfgs: if not m: continue args_str = (cmd, *[str(e) for e in cmd_args], '--model', m) if ax is not None: extra_args = [(f'--{k}', str(v)) for k, v in ax.items()] extra_args = [i for t in extra_args for i in t] args_str += tuple(extra_args) try: o = subprocess.check_output(args=args_str).decode('utf-8').split('--result')[-1] r = json.loads(o) results.append(r) except Exception as e: # FIXME batch_size retry loop is currently done in either validation.py or benchmark.py # for further robustness (but more overhead), we may want to manage that by looping here... errors.append(dict(model=m, error=str(e))) if args.delay: time.sleep(args.delay) except KeyboardInterrupt as e: pass errors.extend(list(filter(lambda x: 'error' in x, results))) if errors: print(f'{len(errors)} models had errors during run.') for e in errors: if 'model' in e: print(f"\t {e['model']} ({e.get('error', 'Unknown')})") else: print(e) results = list(filter(lambda x: 'error' not in x, results)) no_sortkey = list(filter(lambda x: sort_key not in x, results)) if no_sortkey: print(f'{len(no_sortkey)} results missing sort key, skipping sort.') else: results = sorted(results, key=lambda x: x[sort_key], reverse=True) if len(results): print(f'{len(results)} models run successfully. Saving results to {results_file}.') write_results(results_file, results) def write_results(results_file, results): with open(results_file, mode='w') as cf: dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main()
pytorch-image-models/bulk_runner.py/0
{ "file_path": "pytorch-image-models/bulk_runner.py", "repo_id": "pytorch-image-models", "token_count": 3951 }
252
# CSP-DarkNet **CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('cspdarknet53', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `cspdarknet53`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('cspdarknet53', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{bochkovskiy2020yolov4, title={YOLOv4: Optimal Speed and Accuracy of Object Detection}, author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao}, year={2020}, eprint={2004.10934}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: CSP DarkNet Paper: Title: 'YOLOv4: Optimal Speed and Accuracy of Object Detection' URL: https://paperswithcode.com/paper/yolov4-optimal-speed-and-accuracy-of-object Models: - Name: cspdarknet53 In Collection: CSP DarkNet Metadata: FLOPs: 8545018880 Parameters: 27640000 File Size: 110775135 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Global Average Pooling - Mish - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - CutMix - Label Smoothing - Mosaic - Polynomial Learning Rate Decay - SGD with Momentum - Self-Adversarial Training - Weight Decay Training Data: - ImageNet Training Resources: 1x NVIDIA RTX 2070 GPU ID: cspdarknet53 LR: 0.1 Layers: 53 Crop Pct: '0.887' Momentum: 0.9 Batch Size: 128 Image Size: '256' Warmup Steps: 1000 Weight Decay: 0.0005 Interpolation: bilinear Training Steps: 8000000 FPS (GPU RTX 2070): 66 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/cspnet.py#L441 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.05% Top 5 Accuracy: 95.09% -->
pytorch-image-models/hfdocs/source/models/csp-darknet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/csp-darknet.mdx", "repo_id": "pytorch-image-models", "token_count": 1759 }
253
# PNASNet **Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('pnasnet5large', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `pnasnet5large`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('pnasnet5large', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{liu2018progressive, title={Progressive Neural Architecture Search}, author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy}, year={2018}, eprint={1712.00559}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: PNASNet Paper: Title: Progressive Neural Architecture Search URL: https://paperswithcode.com/paper/progressive-neural-architecture-search Models: - Name: pnasnet5large In Collection: PNASNet Metadata: FLOPs: 31458865950 Parameters: 86060000 File Size: 345153926 Architecture: - Average Pooling - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - ReLU Tasks: - Image Classification Training Techniques: - Label Smoothing - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 100x NVIDIA P100 GPUs ID: pnasnet5large LR: 0.015 Dropout: 0.5 Crop Pct: '0.911' Momentum: 0.9 Batch Size: 1600 Image Size: '331' Interpolation: bicubic Label Smoothing: 0.1 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/pnasnet.py#L343 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 0.98% Top 5 Accuracy: 18.58% -->
pytorch-image-models/hfdocs/source/models/pnasnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/pnasnet.mdx", "repo_id": "pytorch-image-models", "token_count": 1625 }
254
# SSL ResNet **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('ssl_resnet18', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `ssl_resnet18`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('ssl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/abs-1905-00546, author = {I. Zeki Yalniz and Herv{\'{e}} J{\'{e}}gou and Kan Chen and Manohar Paluri and Dhruv Mahajan}, title = {Billion-scale semi-supervised learning for image classification}, journal = {CoRR}, volume = {abs/1905.00546}, year = {2019}, url = {http://arxiv.org/abs/1905.00546}, archivePrefix = {arXiv}, eprint = {1905.00546}, timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: SSL ResNet Paper: Title: Billion-scale semi-supervised learning for image classification URL: https://paperswithcode.com/paper/billion-scale-semi-supervised-learning-for Models: - Name: ssl_resnet18 In Collection: SSL ResNet Metadata: FLOPs: 2337073152 Parameters: 11690000 File Size: 46811375 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnet18 LR: 0.0015 Epochs: 30 Layers: 18 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L894 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 72.62% Top 5 Accuracy: 91.42% - Name: ssl_resnet50 In Collection: SSL ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102480594 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet - YFCC-100M Training Resources: 64x GPUs ID: ssl_resnet50 LR: 0.0015 Epochs: 30 Layers: 50 Crop Pct: '0.875' Batch Size: 1536 Image Size: '224' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L904 Weights: https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.24% Top 5 Accuracy: 94.83% -->
pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/ssl-resnet.mdx", "repo_id": "pytorch-image-models", "token_count": 2428 }
255
# Learning Rate Schedulers This page contains the API reference documentation for learning rate schedulers included in `timm`. ## Schedulers ### Factory functions [[autodoc]] timm.scheduler.scheduler_factory.create_scheduler [[autodoc]] timm.scheduler.scheduler_factory.create_scheduler_v2 ### Scheduler Classes [[autodoc]] timm.scheduler.cosine_lr.CosineLRScheduler [[autodoc]] timm.scheduler.multistep_lr.MultiStepLRScheduler [[autodoc]] timm.scheduler.plateau_lr.PlateauLRScheduler [[autodoc]] timm.scheduler.poly_lr.PolyLRScheduler [[autodoc]] timm.scheduler.step_lr.StepLRScheduler [[autodoc]] timm.scheduler.tanh_lr.TanhLRScheduler
pytorch-image-models/hfdocs/source/reference/schedulers.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/reference/schedulers.mdx", "repo_id": "pytorch-image-models", "token_count": 242 }
256
""" AutoAugment, RandAugment, AugMix, and 3-Augment for PyTorch This code implements the searched ImageNet policies with various tweaks and improvements and does not include any of the search code. AA and RA Implementation adapted from: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py AugMix adapted from: https://github.com/google-research/augmix 3-Augment based on: https://github.com/facebookresearch/deit/blob/main/README_revenge.md Papers: AutoAugment: Learning Augmentation Policies from Data - https://arxiv.org/abs/1805.09501 Learning Data Augmentation Strategies for Object Detection - https://arxiv.org/abs/1906.11172 RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 3-Augment: DeiT III: Revenge of the ViT - https://arxiv.org/abs/2204.07118 Hacked together by / Copyright 2019, Ross Wightman """ import random import math import re from functools import partial from typing import Dict, List, Optional, Union from PIL import Image, ImageOps, ImageEnhance, ImageChops, ImageFilter import PIL import numpy as np _PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) _FILL = (128, 128, 128) _LEVEL_DENOM = 10. # denominator for conversion from 'Mx' magnitude scale to fractional aug level for op arguments _HPARAMS_DEFAULT = dict( translate_const=250, img_mean=_FILL, ) if hasattr(Image, "Resampling"): _RANDOM_INTERPOLATION = (Image.Resampling.BILINEAR, Image.Resampling.BICUBIC) _DEFAULT_INTERPOLATION = Image.Resampling.BICUBIC else: _RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) _DEFAULT_INTERPOLATION = Image.BICUBIC def _interpolation(kwargs): interpolation = kwargs.pop('resample', _DEFAULT_INTERPOLATION) if isinstance(interpolation, (list, tuple)): return random.choice(interpolation) return interpolation def _check_args_tf(kwargs): if 'fillcolor' in kwargs and _PIL_VER < (5, 0): kwargs.pop('fillcolor') kwargs['resample'] = _interpolation(kwargs) def shear_x(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) def shear_y(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) def translate_x_rel(img, pct, **kwargs): pixels = pct * img.size[0] _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) def translate_y_rel(img, pct, **kwargs): pixels = pct * img.size[1] _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) def translate_x_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) def translate_y_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) def rotate(img, degrees, **kwargs): _check_args_tf(kwargs) if _PIL_VER >= (5, 2): return img.rotate(degrees, **kwargs) if _PIL_VER >= (5, 0): w, h = img.size post_trans = (0, 0) rotn_center = (w / 2.0, h / 2.0) angle = -math.radians(degrees) matrix = [ round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0, ] def transform(x, y, matrix): (a, b, c, d, e, f) = matrix return a * x + b * y + c, d * x + e * y + f matrix[2], matrix[5] = transform( -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix ) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] return img.transform(img.size, Image.AFFINE, matrix, **kwargs) return img.rotate(degrees, resample=kwargs['resample']) def auto_contrast(img, **__): return ImageOps.autocontrast(img) def invert(img, **__): return ImageOps.invert(img) def equalize(img, **__): return ImageOps.equalize(img) def solarize(img, thresh, **__): return ImageOps.solarize(img, thresh) def solarize_add(img, add, thresh=128, **__): lut = [] for i in range(256): if i < thresh: lut.append(min(255, i + add)) else: lut.append(i) if img.mode in ("L", "RGB"): if img.mode == "RGB" and len(lut) == 256: lut = lut + lut + lut return img.point(lut) return img def posterize(img, bits_to_keep, **__): if bits_to_keep >= 8: return img return ImageOps.posterize(img, bits_to_keep) def contrast(img, factor, **__): return ImageEnhance.Contrast(img).enhance(factor) def color(img, factor, **__): return ImageEnhance.Color(img).enhance(factor) def brightness(img, factor, **__): return ImageEnhance.Brightness(img).enhance(factor) def sharpness(img, factor, **__): return ImageEnhance.Sharpness(img).enhance(factor) def gaussian_blur(img, factor, **__): img = img.filter(ImageFilter.GaussianBlur(radius=factor)) return img def gaussian_blur_rand(img, factor, **__): radius_min = 0.1 radius_max = 2.0 img = img.filter(ImageFilter.GaussianBlur(radius=random.uniform(radius_min, radius_max * factor))) return img def desaturate(img, factor, **_): factor = min(1., max(0., 1. - factor)) # enhance factor 0 = grayscale, 1.0 = no-change return ImageEnhance.Color(img).enhance(factor) def _randomly_negate(v): """With 50% prob, negate the value""" return -v if random.random() > 0.5 else v def _rotate_level_to_arg(level, _hparams): # range [-30, 30] level = (level / _LEVEL_DENOM) * 30. level = _randomly_negate(level) return level, def _enhance_level_to_arg(level, _hparams): # range [0.1, 1.9] return (level / _LEVEL_DENOM) * 1.8 + 0.1, def _enhance_increasing_level_to_arg(level, _hparams): # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend # range [0.1, 1.9] if level <= _LEVEL_DENOM level = (level / _LEVEL_DENOM) * .9 level = max(0.1, 1.0 + _randomly_negate(level)) # keep it >= 0.1 return level, def _minmax_level_to_arg(level, _hparams, min_val=0., max_val=1.0, clamp=True): level = (level / _LEVEL_DENOM) level = min_val + (max_val - min_val) * level if clamp: level = max(min_val, min(max_val, level)) return level, def _shear_level_to_arg(level, _hparams): # range [-0.3, 0.3] level = (level / _LEVEL_DENOM) * 0.3 level = _randomly_negate(level) return level, def _translate_abs_level_to_arg(level, hparams): translate_const = hparams['translate_const'] level = (level / _LEVEL_DENOM) * float(translate_const) level = _randomly_negate(level) return level, def _translate_rel_level_to_arg(level, hparams): # default range [-0.45, 0.45] translate_pct = hparams.get('translate_pct', 0.45) level = (level / _LEVEL_DENOM) * translate_pct level = _randomly_negate(level) return level, def _posterize_level_to_arg(level, _hparams): # As per Tensorflow TPU EfficientNet impl # range [0, 4], 'keep 0 up to 4 MSB of original image' # intensity/severity of augmentation decreases with level return int((level / _LEVEL_DENOM) * 4), def _posterize_increasing_level_to_arg(level, hparams): # As per Tensorflow models research and UDA impl # range [4, 0], 'keep 4 down to 0 MSB of original image', # intensity/severity of augmentation increases with level return 4 - _posterize_level_to_arg(level, hparams)[0], def _posterize_original_level_to_arg(level, _hparams): # As per original AutoAugment paper description # range [4, 8], 'keep 4 up to 8 MSB of image' # intensity/severity of augmentation decreases with level return int((level / _LEVEL_DENOM) * 4) + 4, def _solarize_level_to_arg(level, _hparams): # range [0, 256] # intensity/severity of augmentation decreases with level return min(256, int((level / _LEVEL_DENOM) * 256)), def _solarize_increasing_level_to_arg(level, _hparams): # range [0, 256] # intensity/severity of augmentation increases with level return 256 - _solarize_level_to_arg(level, _hparams)[0], def _solarize_add_level_to_arg(level, _hparams): # range [0, 110] return min(128, int((level / _LEVEL_DENOM) * 110)), LEVEL_TO_ARG = { 'AutoContrast': None, 'Equalize': None, 'Invert': None, 'Rotate': _rotate_level_to_arg, # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers 'Posterize': _posterize_level_to_arg, 'PosterizeIncreasing': _posterize_increasing_level_to_arg, 'PosterizeOriginal': _posterize_original_level_to_arg, 'Solarize': _solarize_level_to_arg, 'SolarizeIncreasing': _solarize_increasing_level_to_arg, 'SolarizeAdd': _solarize_add_level_to_arg, 'Color': _enhance_level_to_arg, 'ColorIncreasing': _enhance_increasing_level_to_arg, 'Contrast': _enhance_level_to_arg, 'ContrastIncreasing': _enhance_increasing_level_to_arg, 'Brightness': _enhance_level_to_arg, 'BrightnessIncreasing': _enhance_increasing_level_to_arg, 'Sharpness': _enhance_level_to_arg, 'SharpnessIncreasing': _enhance_increasing_level_to_arg, 'ShearX': _shear_level_to_arg, 'ShearY': _shear_level_to_arg, 'TranslateX': _translate_abs_level_to_arg, 'TranslateY': _translate_abs_level_to_arg, 'TranslateXRel': _translate_rel_level_to_arg, 'TranslateYRel': _translate_rel_level_to_arg, 'Desaturate': partial(_minmax_level_to_arg, min_val=0.5, max_val=1.0), 'GaussianBlur': partial(_minmax_level_to_arg, min_val=0.1, max_val=2.0), 'GaussianBlurRand': _minmax_level_to_arg, } NAME_TO_OP = { 'AutoContrast': auto_contrast, 'Equalize': equalize, 'Invert': invert, 'Rotate': rotate, 'Posterize': posterize, 'PosterizeIncreasing': posterize, 'PosterizeOriginal': posterize, 'Solarize': solarize, 'SolarizeIncreasing': solarize, 'SolarizeAdd': solarize_add, 'Color': color, 'ColorIncreasing': color, 'Contrast': contrast, 'ContrastIncreasing': contrast, 'Brightness': brightness, 'BrightnessIncreasing': brightness, 'Sharpness': sharpness, 'SharpnessIncreasing': sharpness, 'ShearX': shear_x, 'ShearY': shear_y, 'TranslateX': translate_x_abs, 'TranslateY': translate_y_abs, 'TranslateXRel': translate_x_rel, 'TranslateYRel': translate_y_rel, 'Desaturate': desaturate, 'GaussianBlur': gaussian_blur, 'GaussianBlurRand': gaussian_blur_rand, } class AugmentOp: def __init__(self, name, prob=0.5, magnitude=10, hparams=None): hparams = hparams or _HPARAMS_DEFAULT self.name = name self.aug_fn = NAME_TO_OP[name] self.level_fn = LEVEL_TO_ARG[name] self.prob = prob self.magnitude = magnitude self.hparams = hparams.copy() self.kwargs = dict( fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION, ) # If magnitude_std is > 0, we introduce some randomness # in the usually fixed policy and sample magnitude from a normal distribution # with mean `magnitude` and std-dev of `magnitude_std`. # NOTE This is my own hack, being tested, not in papers or reference impls. # If magnitude_std is inf, we sample magnitude from a uniform distribution self.magnitude_std = self.hparams.get('magnitude_std', 0) self.magnitude_max = self.hparams.get('magnitude_max', None) def __call__(self, img): if self.prob < 1.0 and random.random() > self.prob: return img magnitude = self.magnitude if self.magnitude_std > 0: # magnitude randomization enabled if self.magnitude_std == float('inf'): # inf == uniform sampling magnitude = random.uniform(0, magnitude) elif self.magnitude_std > 0: magnitude = random.gauss(magnitude, self.magnitude_std) # default upper_bound for the timm RA impl is _LEVEL_DENOM (10) # setting magnitude_max overrides this to allow M > 10 (behaviour closer to Google TF RA impl) upper_bound = self.magnitude_max or _LEVEL_DENOM magnitude = max(0., min(magnitude, upper_bound)) level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() return self.aug_fn(img, *level_args, **self.kwargs) def __repr__(self): fs = self.__class__.__name__ + f'(name={self.name}, p={self.prob}' fs += f', m={self.magnitude}, mstd={self.magnitude_std}' if self.magnitude_max is not None: fs += f', mmax={self.magnitude_max}' fs += ')' return fs def auto_augment_policy_v0(hparams): # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. policy = [ [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Color', 0.4, 1), ('Rotate', 0.6, 8)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], [('ShearY', 0.8, 0), ('Color', 0.6, 4)], [('Color', 1.0, 0), ('Rotate', 0.6, 2)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)], ] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy_v0r(hparams): # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used # in Google research implementation (number of bits discarded increases with magnitude) policy = [ [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], [('Color', 0.4, 9), ('Equalize', 0.6, 3)], [('Color', 0.4, 1), ('Rotate', 0.6, 8)], [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], [('Color', 0.2, 0), ('Equalize', 0.8, 8)], [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], [('Color', 0.6, 1), ('Equalize', 1.0, 2)], [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], [('Color', 0.4, 7), ('Equalize', 0.6, 0)], [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], [('Solarize', 0.6, 8), ('Color', 0.6, 9)], [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], [('ShearY', 0.8, 0), ('Color', 0.6, 4)], [('Color', 1.0, 0), ('Rotate', 0.6, 2)], [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], [('Color', 0.8, 6), ('Rotate', 0.4, 5)], ] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy_original(hparams): # ImageNet policy from https://arxiv.org/abs/1805.09501 policy = [ [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], [('Rotate', 0.8, 8), ('Color', 0.4, 0)], [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Rotate', 0.8, 8), ('Color', 1.0, 2)], [('Color', 0.8, 8), ('Solarize', 0.8, 7)], [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], [('Color', 0.4, 0), ('Equalize', 0.6, 3)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], ] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy_originalr(hparams): # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation policy = [ [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], [('Rotate', 0.8, 8), ('Color', 0.4, 0)], [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Rotate', 0.8, 8), ('Color', 1.0, 2)], [('Color', 0.8, 8), ('Solarize', 0.8, 7)], [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], [('Color', 0.4, 0), ('Equalize', 0.6, 3)], [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], [('Color', 0.6, 4), ('Contrast', 1.0, 8)], [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], ] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy_3a(hparams): policy = [ [('Solarize', 1.0, 5)], # 128 solarize threshold @ 5 magnitude [('Desaturate', 1.0, 10)], # grayscale at 10 magnitude [('GaussianBlurRand', 1.0, 10)], ] pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] return pc def auto_augment_policy(name='v0', hparams=None): hparams = hparams or _HPARAMS_DEFAULT if name == 'original': return auto_augment_policy_original(hparams) if name == 'originalr': return auto_augment_policy_originalr(hparams) if name == 'v0': return auto_augment_policy_v0(hparams) if name == 'v0r': return auto_augment_policy_v0r(hparams) if name == '3a': return auto_augment_policy_3a(hparams) assert False, f'Unknown AA policy {name}' class AutoAugment: def __init__(self, policy): self.policy = policy def __call__(self, img): sub_policy = random.choice(self.policy) for op in sub_policy: img = op(img) return img def __repr__(self): fs = self.__class__.__name__ + '(policy=' for p in self.policy: fs += '\n\t[' fs += ', '.join([str(op) for op in p]) fs += ']' fs += ')' return fs def auto_augment_transform(config_str: str, hparams: Optional[Dict] = None): """ Create a AutoAugment transform Args: config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by dashes ('-'). The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr'). While the remaining sections define other arguments * 'mstd' - float std deviation of magnitude noise applied hparams: Other hparams (kwargs) for the AutoAugmentation scheme Returns: A PyTorch compatible Transform Examples:: 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5 """ config = config_str.split('-') policy_name = config[0] config = config[1:] for c in config: cs = re.split(r'(\d.*)', c) if len(cs) < 2: continue key, val = cs[:2] if key == 'mstd': # noise param injected via hparams for now hparams.setdefault('magnitude_std', float(val)) else: assert False, 'Unknown AutoAugment config section' aa_policy = auto_augment_policy(policy_name, hparams=hparams) return AutoAugment(aa_policy) _RAND_TRANSFORMS = [ 'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', 'SolarizeAdd', 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', 'TranslateXRel', 'TranslateYRel', # 'Cutout' # NOTE I've implement this as random erasing separately ] _RAND_INCREASING_TRANSFORMS = [ 'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'PosterizeIncreasing', 'SolarizeIncreasing', 'SolarizeAdd', 'ColorIncreasing', 'ContrastIncreasing', 'BrightnessIncreasing', 'SharpnessIncreasing', 'ShearX', 'ShearY', 'TranslateXRel', 'TranslateYRel', # 'Cutout' # NOTE I've implement this as random erasing separately ] _RAND_3A = [ 'SolarizeIncreasing', 'Desaturate', 'GaussianBlur', ] _RAND_WEIGHTED_3A = { 'SolarizeIncreasing': 6, 'Desaturate': 6, 'GaussianBlur': 6, 'Rotate': 3, 'ShearX': 2, 'ShearY': 2, 'PosterizeIncreasing': 1, 'AutoContrast': 1, 'ColorIncreasing': 1, 'SharpnessIncreasing': 1, 'ContrastIncreasing': 1, 'BrightnessIncreasing': 1, 'Equalize': 1, 'Invert': 1, } # These experimental weights are based loosely on the relative improvements mentioned in paper. # They may not result in increased performance, but could likely be tuned to so. _RAND_WEIGHTED_0 = { 'Rotate': 3, 'ShearX': 2, 'ShearY': 2, 'TranslateXRel': 1, 'TranslateYRel': 1, 'ColorIncreasing': .25, 'SharpnessIncreasing': 0.25, 'AutoContrast': 0.25, 'SolarizeIncreasing': .05, 'SolarizeAdd': .05, 'ContrastIncreasing': .05, 'BrightnessIncreasing': .05, 'Equalize': .05, 'PosterizeIncreasing': 0.05, 'Invert': 0.05, } def _get_weighted_transforms(transforms: Dict): transforms, probs = list(zip(*transforms.items())) probs = np.array(probs) probs = probs / np.sum(probs) return transforms, probs def rand_augment_choices(name: str, increasing=True): if name == 'weights': return _RAND_WEIGHTED_0 if name == '3aw': return _RAND_WEIGHTED_3A if name == '3a': return _RAND_3A return _RAND_INCREASING_TRANSFORMS if increasing else _RAND_TRANSFORMS def rand_augment_ops( magnitude: Union[int, float] = 10, prob: float = 0.5, hparams: Optional[Dict] = None, transforms: Optional[Union[Dict, List]] = None, ): hparams = hparams or _HPARAMS_DEFAULT transforms = transforms or _RAND_TRANSFORMS return [AugmentOp( name, prob=prob, magnitude=magnitude, hparams=hparams) for name in transforms] class RandAugment: def __init__(self, ops, num_layers=2, choice_weights=None): self.ops = ops self.num_layers = num_layers self.choice_weights = choice_weights def __call__(self, img): # no replacement when using weighted choice ops = np.random.choice( self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights, ) for op in ops: img = op(img) return img def __repr__(self): fs = self.__class__.__name__ + f'(n={self.num_layers}, ops=' for op in self.ops: fs += f'\n\t{op}' fs += ')' return fs def rand_augment_transform( config_str: str, hparams: Optional[Dict] = None, transforms: Optional[Union[str, Dict, List]] = None, ): """ Create a RandAugment transform Args: config_str (str): String defining configuration of random augmentation. Consists of multiple sections separated by dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining sections, not order specific determine * 'm' - integer magnitude of rand augment * 'n' - integer num layers (number of transform ops selected per image) * 'p' - float probability of applying each layer (default 0.5) * 'mstd' - float std deviation of magnitude noise applied, or uniform sampling if infinity (or > 100) * 'mmax' - set upper bound for magnitude to something other than default of _LEVEL_DENOM (10) * 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) * 't' - str name of transform set to use hparams (dict): Other hparams (kwargs) for the RandAugmentation scheme Returns: A PyTorch compatible Transform Examples:: 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 'rand-mstd1-tweights' results in mag std 1.0, weighted transforms, default mag of 10 and num_layers 2 """ magnitude = _LEVEL_DENOM # default to _LEVEL_DENOM for magnitude (currently 10) num_layers = 2 # default to 2 ops per image increasing = False prob = 0.5 config = config_str.split('-') assert config[0] == 'rand' config = config[1:] for c in config: if c.startswith('t'): # NOTE old 'w' key was removed, 'w0' is not equivalent to 'tweights' val = str(c[1:]) if transforms is None: transforms = val else: # numeric options cs = re.split(r'(\d.*)', c) if len(cs) < 2: continue key, val = cs[:2] if key == 'mstd': # noise param / randomization of magnitude values mstd = float(val) if mstd > 100: # use uniform sampling in 0 to magnitude if mstd is > 100 mstd = float('inf') hparams.setdefault('magnitude_std', mstd) elif key == 'mmax': # clip magnitude between [0, mmax] instead of default [0, _LEVEL_DENOM] hparams.setdefault('magnitude_max', int(val)) elif key == 'inc': if bool(val): increasing = True elif key == 'm': magnitude = int(val) elif key == 'n': num_layers = int(val) elif key == 'p': prob = float(val) else: assert False, 'Unknown RandAugment config section' if isinstance(transforms, str): transforms = rand_augment_choices(transforms, increasing=increasing) elif transforms is None: transforms = _RAND_INCREASING_TRANSFORMS if increasing else _RAND_TRANSFORMS choice_weights = None if isinstance(transforms, Dict): transforms, choice_weights = _get_weighted_transforms(transforms) ra_ops = rand_augment_ops(magnitude=magnitude, prob=prob, hparams=hparams, transforms=transforms) return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) _AUGMIX_TRANSFORMS = [ 'AutoContrast', 'ColorIncreasing', # not in paper 'ContrastIncreasing', # not in paper 'BrightnessIncreasing', # not in paper 'SharpnessIncreasing', # not in paper 'Equalize', 'Rotate', 'PosterizeIncreasing', 'SolarizeIncreasing', 'ShearX', 'ShearY', 'TranslateXRel', 'TranslateYRel', ] def augmix_ops( magnitude: Union[int, float] = 10, hparams: Optional[Dict] = None, transforms: Optional[Union[str, Dict, List]] = None, ): hparams = hparams or _HPARAMS_DEFAULT transforms = transforms or _AUGMIX_TRANSFORMS return [AugmentOp( name, prob=1.0, magnitude=magnitude, hparams=hparams ) for name in transforms] class AugMixAugment: """ AugMix Transform Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 """ def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False): self.ops = ops self.alpha = alpha self.width = width self.depth = depth self.blended = blended # blended mode is faster but not well tested def _calc_blended_weights(self, ws, m): ws = ws * m cump = 1. rws = [] for w in ws[::-1]: alpha = w / cump cump *= (1 - alpha) rws.append(alpha) return np.array(rws[::-1], dtype=np.float32) def _apply_blended(self, img, mixing_weights, m): # This is my first crack and implementing a slightly faster mixed augmentation. Instead # of accumulating the mix for each chain in a Numpy array and then blending with original, # it recomputes the blending coefficients and applies one PIL image blend per chain. # TODO the results appear in the right ballpark but they differ by more than rounding. img_orig = img.copy() ws = self._calc_blended_weights(mixing_weights, m) for w in ws: depth = self.depth if self.depth > 0 else np.random.randint(1, 4) ops = np.random.choice(self.ops, depth, replace=True) img_aug = img_orig # no ops are in-place, deep copy not necessary for op in ops: img_aug = op(img_aug) img = Image.blend(img, img_aug, w) return img def _apply_basic(self, img, mixing_weights, m): # This is a literal adaptation of the paper/official implementation without normalizations and # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the # typical augmentation transforms, could use a GPU / Kornia implementation. img_shape = img.size[0], img.size[1], len(img.getbands()) mixed = np.zeros(img_shape, dtype=np.float32) for mw in mixing_weights: depth = self.depth if self.depth > 0 else np.random.randint(1, 4) ops = np.random.choice(self.ops, depth, replace=True) img_aug = img # no ops are in-place, deep copy not necessary for op in ops: img_aug = op(img_aug) mixed += mw * np.asarray(img_aug, dtype=np.float32) np.clip(mixed, 0, 255., out=mixed) mixed = Image.fromarray(mixed.astype(np.uint8)) return Image.blend(img, mixed, m) def __call__(self, img): mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) m = np.float32(np.random.beta(self.alpha, self.alpha)) if self.blended: mixed = self._apply_blended(img, mixing_weights, m) else: mixed = self._apply_basic(img, mixing_weights, m) return mixed def __repr__(self): fs = self.__class__.__name__ + f'(alpha={self.alpha}, width={self.width}, depth={self.depth}, ops=' for op in self.ops: fs += f'\n\t{op}' fs += ')' return fs def augment_and_mix_transform(config_str: str, hparams: Optional[Dict] = None): """ Create AugMix PyTorch transform Args: config_str (str): String defining configuration of random augmentation. Consists of multiple sections separated by dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining sections, not order specific determine 'm' - integer magnitude (severity) of augmentation mix (default: 3) 'w' - integer width of augmentation chain (default: 3) 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1) 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0) 'mstd' - float std deviation of magnitude noise applied (default: 0) Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2 hparams: Other hparams (kwargs) for the Augmentation transforms Returns: A PyTorch compatible Transform """ magnitude = 3 width = 3 depth = -1 alpha = 1. blended = False config = config_str.split('-') assert config[0] == 'augmix' config = config[1:] for c in config: cs = re.split(r'(\d.*)', c) if len(cs) < 2: continue key, val = cs[:2] if key == 'mstd': # noise param injected via hparams for now hparams.setdefault('magnitude_std', float(val)) elif key == 'm': magnitude = int(val) elif key == 'w': width = int(val) elif key == 'd': depth = int(val) elif key == 'a': alpha = float(val) elif key == 'b': blended = bool(val) else: assert False, 'Unknown AugMix config section' hparams.setdefault('magnitude_std', float('inf')) # default to uniform sampling (if not set via mstd arg) ops = augmix_ops(magnitude=magnitude, hparams=hparams) return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended)
pytorch-image-models/timm/data/auto_augment.py/0
{ "file_path": "pytorch-image-models/timm/data/auto_augment.py", "repo_id": "pytorch-image-models", "token_count": 15926 }
257
from .reader_factory import create_reader from .img_extensions import *
pytorch-image-models/timm/data/readers/__init__.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/__init__.py", "repo_id": "pytorch-image-models", "token_count": 20 }
258
""" Transforms Factory Factory methods for building image transforms for use with TIMM (PyTorch Image Models) Hacked together by / Copyright 2019, Ross Wightman """ import math from typing import Optional, Tuple, Union import torch from torchvision import transforms from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform from timm.data.transforms import str_to_interp_mode, str_to_pil_interp, RandomResizedCropAndInterpolation, \ ResizeKeepRatio, CenterCropOrPad, RandomCropOrPad, TrimBorder, MaybeToTensor, MaybePILToTensor from timm.data.naflex_transforms import RandomResizedCropToSequence, ResizeToSequence, Patchify from timm.data.random_erasing import RandomErasing def transforms_noaug_train( img_size: Union[int, Tuple[int, int]] = 224, interpolation: str = 'bilinear', mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...] = IMAGENET_DEFAULT_STD, use_prefetcher: bool = False, normalize: bool = True, ): """ No-augmentation image transforms for training. Args: img_size: Target image size. interpolation: Image interpolation mode. mean: Image normalization mean. std: Image normalization standard deviation. use_prefetcher: Prefetcher enabled. Do not convert image to tensor or normalize. normalize: Normalization tensor output w/ provided mean/std (if prefetcher not used). Returns: """ if interpolation == 'random': # random interpolation not supported with no-aug interpolation = 'bilinear' tfl = [ transforms.Resize(img_size, interpolation=str_to_interp_mode(interpolation)), transforms.CenterCrop(img_size) ] if use_prefetcher: # prefetcher and collate will handle tensor conversion and norm tfl += [MaybePILToTensor()] elif not normalize: # when normalize disabled, converted to tensor without scaling, keep original dtype tfl += [MaybePILToTensor()] else: tfl += [ MaybeToTensor(), transforms.Normalize( mean=torch.tensor(mean), std=torch.tensor(std) ) ] return transforms.Compose(tfl) def transforms_imagenet_train( img_size: Union[int, Tuple[int, int]] = 224, scale: Optional[Tuple[float, float]] = None, ratio: Optional[Tuple[float, float]] = None, train_crop_mode: Optional[str] = None, hflip: float = 0.5, vflip: float = 0., color_jitter: Union[float, Tuple[float, ...]] = 0.4, color_jitter_prob: Optional[float] = None, force_color_jitter: bool = False, grayscale_prob: float = 0., gaussian_blur_prob: float = 0., auto_augment: Optional[str] = None, interpolation: str = 'random', mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...] = IMAGENET_DEFAULT_STD, re_prob: float = 0., re_mode: str = 'const', re_count: int = 1, re_num_splits: int = 0, use_prefetcher: bool = False, normalize: bool = True, separate: bool = False, naflex: bool = False, patch_size: Union[int, Tuple[int, int]] = 16, max_seq_len: int = 576, # 24x24 for 16x16 patch patchify: bool = False, ): """ ImageNet-oriented image transforms for training. Args: img_size: Target image size. train_crop_mode: Training random crop mode ('rrc', 'rkrc', 'rkrr'). scale: Random resize scale range (crop area, < 1.0 => zoom in). ratio: Random aspect ratio range (crop ratio for RRC, ratio adjustment factor for RKR). hflip: Horizontal flip probability. vflip: Vertical flip probability. color_jitter: Random color jitter component factors (brightness, contrast, saturation, hue). Scalar is applied as (scalar,) * 3 (no hue). color_jitter_prob: Apply color jitter with this probability if not None (for SimlCLR-like aug). force_color_jitter: Force color jitter where it is normally disabled (ie with RandAugment on). grayscale_prob: Probability of converting image to grayscale (for SimCLR-like aug). gaussian_blur_prob: Probability of applying gaussian blur (for SimCLR-like aug). auto_augment: Auto augment configuration string (see auto_augment.py). interpolation: Image interpolation mode. mean: Image normalization mean. std: Image normalization standard deviation. re_prob: Random erasing probability. re_mode: Random erasing fill mode. re_count: Number of random erasing regions. re_num_splits: Control split of random erasing across batch size. use_prefetcher: Prefetcher enabled. Do not convert image to tensor or normalize. normalize: Normalize tensor output w/ provided mean/std (if prefetcher not used). separate: Output transforms in 3-stage tuple. naflex: Enable NaFlex mode, sequence constrained patch output patch_size: Patch size for NaFlex mode. max_seq_len: Max sequence length for NaFlex mode. Returns: If separate==True, the transforms are returned as a tuple of 3 separate transforms for use in a mixing dataset that passes * all data through the first (primary) transform, called the 'clean' data * a portion of the data through the secondary transform * normalizes and converts the branches above with the third, final transform """ train_crop_mode = train_crop_mode or 'rrc' assert train_crop_mode in {'rrc', 'rkrc', 'rkrr'} primary_tfl = [] if naflex: scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range ratio = tuple(ratio or (3. / 4., 4. / 3.)) # default imagenet ratio range primary_tfl += [RandomResizedCropToSequence( patch_size=patch_size, max_seq_len=max_seq_len, scale=scale, ratio=ratio, interpolation=interpolation )] else: if train_crop_mode in ('rkrc', 'rkrr'): # FIXME integration of RKR is a WIP scale = tuple(scale or (0.8, 1.00)) ratio = tuple(ratio or (0.9, 1/.9)) primary_tfl += [ ResizeKeepRatio( img_size, interpolation=interpolation, random_scale_prob=0.5, random_scale_range=scale, random_scale_area=True, # scale compatible with RRC random_aspect_prob=0.5, random_aspect_range=ratio, ), CenterCropOrPad(img_size, padding_mode='reflect') if train_crop_mode == 'rkrc' else RandomCropOrPad(img_size, padding_mode='reflect') ] else: scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range ratio = tuple(ratio or (3. / 4., 4. / 3.)) # default imagenet ratio range primary_tfl += [ RandomResizedCropAndInterpolation( img_size, scale=scale, ratio=ratio, interpolation=interpolation, ) ] if hflip > 0.: primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] if vflip > 0.: primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] secondary_tfl = [] disable_color_jitter = False if auto_augment: assert isinstance(auto_augment, str) # color jitter is typically disabled if AA/RA on, # this allows override without breaking old hparm cfgs disable_color_jitter = not (force_color_jitter or '3a' in auto_augment) if isinstance(img_size, (tuple, list)): img_size_min = min(img_size) else: img_size_min = img_size aa_params = dict( translate_const=int(img_size_min * 0.45), img_mean=tuple([min(255, round(255 * x)) for x in mean]), ) if interpolation and interpolation != 'random': aa_params['interpolation'] = str_to_pil_interp(interpolation) if auto_augment.startswith('rand'): secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] elif auto_augment.startswith('augmix'): aa_params['translate_pct'] = 0.3 secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] else: secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] if color_jitter is not None and not disable_color_jitter: # color jitter is enabled when not using AA or when forced if isinstance(color_jitter, (list, tuple)): # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation # or 4 if also augmenting hue assert len(color_jitter) in (3, 4) else: # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue color_jitter = (float(color_jitter),) * 3 if color_jitter_prob is not None: secondary_tfl += [ transforms.RandomApply([ transforms.ColorJitter(*color_jitter), ], p=color_jitter_prob ) ] else: secondary_tfl += [transforms.ColorJitter(*color_jitter)] if grayscale_prob: secondary_tfl += [transforms.RandomGrayscale(p=grayscale_prob)] if gaussian_blur_prob: secondary_tfl += [ transforms.RandomApply([ transforms.GaussianBlur(kernel_size=23), # hardcoded for now ], p=gaussian_blur_prob, ) ] final_tfl = [] if use_prefetcher: # prefetcher and collate will handle tensor conversion and norm final_tfl += [MaybePILToTensor()] elif not normalize: # when normalize disable, converted to tensor without scaling, keeps original dtype final_tfl += [MaybePILToTensor()] else: final_tfl += [ MaybeToTensor(), transforms.Normalize( mean=torch.tensor(mean), std=torch.tensor(std), ), ] if re_prob > 0.: final_tfl += [ RandomErasing( re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu', ) ] if patchify: final_tfl += [Patchify(patch_size=patch_size)] if separate: return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl) else: return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) def transforms_imagenet_eval( img_size: Union[int, Tuple[int, int]] = 224, crop_pct: Optional[float] = None, crop_mode: Optional[str] = None, crop_border_pixels: Optional[int] = None, interpolation: str = 'bilinear', mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...] = IMAGENET_DEFAULT_STD, use_prefetcher: bool = False, normalize: bool = True, naflex: bool = False, patch_size: Union[int, Tuple[int, int]] = 16, max_seq_len: int = 576, # 24x24 for 16x16 patch patchify: bool = False, ): """ ImageNet-oriented image transform for evaluation and inference. Args: img_size: Target image size. crop_pct: Crop percentage. Defaults to 0.875 when None. crop_mode: Crop mode. One of ['squash', 'border', 'center']. Defaults to 'center' when None. crop_border_pixels: Trim a border of specified # pixels around edge of original image. interpolation: Image interpolation mode. mean: Image normalization mean. std: Image normalization standard deviation. use_prefetcher: Prefetcher enabled. Do not convert image to tensor or normalize. normalize: Normalize tensor output w/ provided mean/std (if prefetcher not used). naflex: Enable NaFlex mode, sequence constrained patch output patch_size: Patch size for NaFlex mode. max_seq_len: Max sequence length for NaFlex mode. patchify: Patchify the output instead of relying on prefetcher Returns: Composed transform pipeline """ crop_pct = crop_pct or DEFAULT_CROP_PCT if isinstance(img_size, (tuple, list)): assert len(img_size) == 2 scale_size = tuple([math.floor(x / crop_pct) for x in img_size]) else: scale_size = math.floor(img_size / crop_pct) scale_size = (scale_size, scale_size) tfl = [] if crop_border_pixels: tfl += [TrimBorder(crop_border_pixels)] if naflex: tfl += [ResizeToSequence( patch_size=patch_size, max_seq_len=max_seq_len, interpolation=interpolation, )] else: if crop_mode == 'squash': # squash mode scales each edge to 1/pct of target, then crops # aspect ratio is not preserved, no img lost if crop_pct == 1.0 tfl += [ transforms.Resize(scale_size, interpolation=str_to_interp_mode(interpolation)), transforms.CenterCrop(img_size), ] elif crop_mode == 'border': # scale the longest edge of image to 1/pct of target edge, add borders to pad, then crop # no image lost if crop_pct == 1.0 fill = [round(255 * v) for v in mean] tfl += [ ResizeKeepRatio(scale_size, interpolation=interpolation, longest=1.0), CenterCropOrPad(img_size, fill=fill), ] else: # default crop model is center # aspect ratio is preserved, crops center within image, no borders are added, image is lost if scale_size[0] == scale_size[1]: # simple case, use torchvision built-in Resize w/ shortest edge mode (scalar size arg) tfl += [ transforms.Resize(scale_size[0], interpolation=str_to_interp_mode(interpolation)) ] else: # resize the shortest edge to matching target dim for non-square target tfl += [ResizeKeepRatio(scale_size)] tfl += [transforms.CenterCrop(img_size)] if use_prefetcher: # prefetcher and collate will handle tensor conversion and norm tfl += [MaybePILToTensor()] elif not normalize: # when normalize disabled, converted to tensor without scaling, keeps original dtype tfl += [MaybePILToTensor()] else: tfl += [ MaybeToTensor(), transforms.Normalize( mean=torch.tensor(mean), std=torch.tensor(std), ), ] if patchify: tfl += [Patchify(patch_size=patch_size)] return transforms.Compose(tfl) def create_transform( input_size: Union[int, Tuple[int, int], Tuple[int, int, int]] = 224, is_training: bool = False, no_aug: bool = False, train_crop_mode: Optional[str] = None, scale: Optional[Tuple[float, float]] = None, ratio: Optional[Tuple[float, float]] = None, hflip: float = 0.5, vflip: float = 0., color_jitter: Union[float, Tuple[float, ...]] = 0.4, color_jitter_prob: Optional[float] = None, grayscale_prob: float = 0., gaussian_blur_prob: float = 0., auto_augment: Optional[str] = None, interpolation: str = 'bilinear', mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...] = IMAGENET_DEFAULT_STD, re_prob: float = 0., re_mode: str = 'const', re_count: int = 1, re_num_splits: int = 0, crop_pct: Optional[float] = None, crop_mode: Optional[str] = None, crop_border_pixels: Optional[int] = None, tf_preprocessing: bool = False, use_prefetcher: bool = False, normalize: bool = True, separate: bool = False, naflex: bool = False, patch_size: Union[int, Tuple[int, int]] = 16, max_seq_len: int = 576, # 24x24 for 16x16 patch patchify: bool = False ): """ Args: input_size: Target input size (channels, height, width) tuple or size scalar. is_training: Return training (random) transforms. no_aug: Disable augmentation for training (useful for debug). train_crop_mode: Training random crop mode ('rrc', 'rkrc', 'rkrr'). scale: Random resize scale range (crop area, < 1.0 => zoom in). ratio: Random aspect ratio range (crop ratio for RRC, ratio adjustment factor for RKR). hflip: Horizontal flip probability. vflip: Vertical flip probability. color_jitter: Random color jitter component factors (brightness, contrast, saturation, hue). Scalar is applied as (scalar,) * 3 (no hue). color_jitter_prob: Apply color jitter with this probability if not None (for SimlCLR-like aug). grayscale_prob: Probability of converting image to grayscale (for SimCLR-like aug). gaussian_blur_prob: Probability of applying gaussian blur (for SimCLR-like aug). auto_augment: Auto augment configuration string (see auto_augment.py). interpolation: Image interpolation mode. mean: Image normalization mean. std: Image normalization standard deviation. re_prob: Random erasing probability. re_mode: Random erasing fill mode. re_count: Number of random erasing regions. re_num_splits: Control split of random erasing across batch size. crop_pct: Inference crop percentage (output size / resize size). crop_mode: Inference crop mode. One of ['squash', 'border', 'center']. Defaults to 'center' when None. crop_border_pixels: Inference crop border of specified # pixels around edge of original image. tf_preprocessing: Use TF 1.0 inference preprocessing for testing model ports use_prefetcher: Pre-fetcher enabled. Do not convert image to tensor or normalize. normalize: Normalization tensor output w/ provided mean/std (if prefetcher not used). separate: Output transforms in 3-stage tuple. Returns: Composed transforms or tuple thereof """ if isinstance(input_size, (tuple, list)): img_size = input_size[-2:] else: img_size = input_size if tf_preprocessing and use_prefetcher: assert not separate, "Separate transforms not supported for TF preprocessing" from timm.data.tf_preprocessing import TfPreprocessTransform transform = TfPreprocessTransform( is_training=is_training, size=img_size, interpolation=interpolation, ) else: if is_training and no_aug: assert not separate, "Cannot perform split augmentation with no_aug" transform = transforms_noaug_train( img_size, interpolation=interpolation, mean=mean, std=std, use_prefetcher=use_prefetcher, normalize=normalize, ) elif is_training: transform = transforms_imagenet_train( img_size, train_crop_mode=train_crop_mode, scale=scale, ratio=ratio, hflip=hflip, vflip=vflip, color_jitter=color_jitter, color_jitter_prob=color_jitter_prob, grayscale_prob=grayscale_prob, gaussian_blur_prob=gaussian_blur_prob, auto_augment=auto_augment, interpolation=interpolation, mean=mean, std=std, re_prob=re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits, use_prefetcher=use_prefetcher, normalize=normalize, separate=separate, naflex=naflex, patch_size=patch_size, max_seq_len=max_seq_len, patchify=patchify, ) else: assert not separate, "Separate transforms not supported for validation preprocessing" transform = transforms_imagenet_eval( img_size, interpolation=interpolation, mean=mean, std=std, crop_pct=crop_pct, crop_mode=crop_mode, crop_border_pixels=crop_border_pixels, use_prefetcher=use_prefetcher, normalize=normalize, naflex=naflex, patch_size=patch_size, max_seq_len=max_seq_len, patchify=patchify, ) return transform
pytorch-image-models/timm/data/transforms_factory.py/0
{ "file_path": "pytorch-image-models/timm/data/transforms_factory.py", "repo_id": "pytorch-image-models", "token_count": 9797 }
259
""" Conv2d w/ Same Padding Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F from typing import Tuple, Optional from ._fx import register_notrace_module from .config import is_exportable, is_scriptable from .padding import pad_same, pad_same_arg, get_padding_value _USE_EXPORT_CONV = False def conv2d_same( x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1, ): x = pad_same(x, weight.shape[-2:], stride, dilation) return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) @register_notrace_module class Conv2dSame(nn.Conv2d): """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, ): super(Conv2dSame, self).__init__( in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, ) def forward(self, x): return conv2d_same( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) class Conv2dSameExport(nn.Conv2d): """ ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions NOTE: This does not currently work with torch.jit.script """ # pylint: disable=unused-argument def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, ): super(Conv2dSameExport, self).__init__( in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, ) self.pad = None self.pad_input_size = (0, 0) def forward(self, x): input_size = x.size()[-2:] if self.pad is None: pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation) self.pad = nn.ZeroPad2d(pad_arg) self.pad_input_size = input_size x = self.pad(x) return F.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): padding = kwargs.pop('padding', '') kwargs.setdefault('bias', False) padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) if is_dynamic: if _USE_EXPORT_CONV and is_exportable(): # older PyTorch ver needed this to export same padding reasonably assert not is_scriptable() # Conv2DSameExport does not work with jit return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs) else: return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) else: return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
pytorch-image-models/timm/layers/conv2d_same.py/0
{ "file_path": "pytorch-image-models/timm/layers/conv2d_same.py", "repo_id": "pytorch-image-models", "token_count": 1579 }
260
""" Global Response Normalization Module Based on the GRN layer presented in `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 This implementation * works for both NCHW and NHWC tensor layouts * uses affine param names matching existing torch norm layers * slightly improves eager mode performance via fused addcmul Hacked together by / Copyright 2023 Ross Wightman """ import torch from torch import nn as nn class GlobalResponseNorm(nn.Module): """ Global Response Normalization layer """ def __init__(self, dim, eps=1e-6, channels_last=True): super().__init__() self.eps = eps if channels_last: self.spatial_dim = (1, 2) self.channel_dim = -1 self.wb_shape = (1, 1, 1, -1) else: self.spatial_dim = (2, 3) self.channel_dim = 1 self.wb_shape = (1, -1, 1, 1) self.weight = nn.Parameter(torch.zeros(dim)) self.bias = nn.Parameter(torch.zeros(dim)) def forward(self, x): x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True) x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps) return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n)
pytorch-image-models/timm/layers/grn.py/0
{ "file_path": "pytorch-image-models/timm/layers/grn.py", "repo_id": "pytorch-image-models", "token_count": 565 }
261
""" Padding Helpers Hacked together by / Copyright 2020 Ross Wightman """ import math from typing import List, Tuple, Union import torch import torch.nn.functional as F from .helpers import to_2tuple # Calculate symmetric padding for a convolution def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> Union[int, List[int]]: if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]): kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation) return [get_padding(*a) for a in zip(kernel_size, stride, dilation)] padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int): if isinstance(x, torch.Tensor): return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0) else: return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) # Can SAME padding for given args be done statically? def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]): kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation) return all([is_static_pad(*a) for a in zip(kernel_size, stride, dilation)]) return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 def pad_same_arg( input_size: List[int], kernel_size: List[int], stride: List[int], dilation: List[int] = (1, 1), ) -> List[int]: ih, iw = input_size kh, kw = kernel_size pad_h = get_same_padding(ih, kh, stride[0], dilation[0]) pad_w = get_same_padding(iw, kw, stride[1], dilation[1]) return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] # Dynamically pad input x with 'SAME' padding for conv with specified args def pad_same( x, kernel_size: List[int], stride: List[int], dilation: List[int] = (1, 1), value: float = 0, ): ih, iw = x.size()[-2:] pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0]) pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1]) x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value) return x def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: dynamic = False if isinstance(padding, str): # for any string padding, the padding will be calculated for you, one of three ways padding = padding.lower() if padding == 'same': # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact if is_static_pad(kernel_size, **kwargs): # static case, no extra overhead padding = get_padding(kernel_size, **kwargs) else: # dynamic 'SAME' padding, has runtime/GPU memory overhead padding = 0 dynamic = True elif padding == 'valid': # 'VALID' padding, same as padding=0 padding = 0 else: # Default to PyTorch style 'same'-ish symmetric padding padding = get_padding(kernel_size, **kwargs) return padding, dynamic
pytorch-image-models/timm/layers/padding.py/0
{ "file_path": "pytorch-image-models/timm/layers/padding.py", "repo_id": "pytorch-image-models", "token_count": 1439 }
262
try: from torch import _assert except ImportError: def _assert(condition: bool, message: str): assert condition, message def _float_to_int(x: float) -> int: """ Symbolic tracing helper to substitute for inbuilt `int`. Hint: Inbuilt `int` can't accept an argument of type `Proxy` """ return int(x)
pytorch-image-models/timm/layers/trace_utils.py/0
{ "file_path": "pytorch-image-models/timm/layers/trace_utils.py", "repo_id": "pytorch-image-models", "token_count": 119 }
263
import hashlib import json import logging import os from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import torch from torch.hub import HASH_REGEX, download_url_to_file, urlparse try: from torch.hub import get_dir except ImportError: from torch.hub import _get_torch_home as get_dir try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False try: from typing import Literal except ImportError: from typing_extensions import Literal from timm import __version__ from timm.models._pretrained import filter_pretrained_cfg try: from huggingface_hub import ( create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, repo_type_and_id_from_hf_id, upload_folder) from huggingface_hub.utils import EntryNotFoundError hf_hub_download = partial(hf_hub_download, library_name="timm", library_version=__version__) _has_hf_hub = True except ImportError: hf_hub_download = None _has_hf_hub = False _logger = logging.getLogger(__name__) __all__ = ['get_cache_dir', 'download_cached_file', 'has_hf_hub', 'hf_split', 'load_model_config_from_hf', 'load_state_dict_from_hf', 'save_for_hf', 'push_to_hf_hub'] # Default name for a weights file hosted on the Huggingface Hub. HF_WEIGHTS_NAME = "pytorch_model.bin" # default pytorch pkl HF_SAFE_WEIGHTS_NAME = "model.safetensors" # safetensors version HF_OPEN_CLIP_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl HF_OPEN_CLIP_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version def get_cache_dir(child_dir: str = ''): """ Returns the location of the directory where models are cached (and creates it if necessary). """ # Issue warning to move data if old env is set if os.getenv('TORCH_MODEL_ZOO'): _logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') hub_dir = get_dir() child_dir = () if not child_dir else (child_dir,) model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir) os.makedirs(model_dir, exist_ok=True) return model_dir def download_cached_file( url: Union[str, List[str], Tuple[str, str]], check_hash: bool = True, progress: bool = False, cache_dir: Optional[Union[str, Path]] = None, ): if isinstance(url, (list, tuple)): url, filename = url else: parts = urlparse(url) filename = os.path.basename(parts.path) if cache_dir: os.makedirs(cache_dir, exist_ok=True) else: cache_dir = get_cache_dir() cached_file = os.path.join(cache_dir, filename) if not os.path.exists(cached_file): _logger.info('Downloading: "{}" to {}\n'.format(url, cached_file)) hash_prefix = None if check_hash: r = HASH_REGEX.search(filename) # r is Optional[Match[str]] hash_prefix = r.group(1) if r else None download_url_to_file(url, cached_file, hash_prefix, progress=progress) return cached_file def check_cached_file( url: Union[str, List[str], Tuple[str, str]], check_hash: bool = True, cache_dir: Optional[Union[str, Path]] = None, ): if isinstance(url, (list, tuple)): url, filename = url else: parts = urlparse(url) filename = os.path.basename(parts.path) if not cache_dir: cache_dir = get_cache_dir() cached_file = os.path.join(cache_dir, filename) if os.path.exists(cached_file): if check_hash: r = HASH_REGEX.search(filename) # r is Optional[Match[str]] hash_prefix = r.group(1) if r else None if hash_prefix: with open(cached_file, 'rb') as f: hd = hashlib.sha256(f.read()).hexdigest() if hd[:len(hash_prefix)] != hash_prefix: return False return True return False def has_hf_hub(necessary: bool = False): if not _has_hf_hub and necessary: # if no HF Hub module installed, and it is necessary to continue, raise error raise RuntimeError( 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') return _has_hf_hub def hf_split(hf_id: str): # FIXME I may change @ -> # and be parsed as fragment in a URI model name scheme rev_split = hf_id.split('@') assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.' hf_model_id = rev_split[0] hf_revision = rev_split[-1] if len(rev_split) > 1 else None return hf_model_id, hf_revision def load_cfg_from_json(json_file: Union[str, Path]): with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return json.loads(text) def download_from_hf( model_id: str, filename: str, cache_dir: Optional[Union[str, Path]] = None, ): hf_model_id, hf_revision = hf_split(model_id) return hf_hub_download( hf_model_id, filename, revision=hf_revision, cache_dir=cache_dir, ) def _parse_model_cfg( cfg: Dict[str, Any], extra_fields: Dict[str, Any], ) -> Tuple[Dict[str, Any], str, Dict[str, Any]]: """""" # legacy "single‑dict" → split if "pretrained_cfg" not in cfg: pretrained_cfg = cfg cfg = { "architecture": pretrained_cfg.pop("architecture"), "num_features": pretrained_cfg.pop("num_features", None), "pretrained_cfg": pretrained_cfg, } if "labels" in pretrained_cfg: # rename ‑‑> label_names pretrained_cfg["label_names"] = pretrained_cfg.pop("labels") pretrained_cfg = cfg["pretrained_cfg"] pretrained_cfg.update(extra_fields) # top‑level overrides if "num_classes" in cfg: pretrained_cfg["num_classes"] = cfg["num_classes"] if "label_names" in cfg: pretrained_cfg["label_names"] = cfg.pop("label_names") if "label_descriptions" in cfg: pretrained_cfg["label_descriptions"] = cfg.pop("label_descriptions") model_args = cfg.get("model_args", {}) model_name = cfg["architecture"] return pretrained_cfg, model_name, model_args def load_model_config_from_hf( model_id: str, cache_dir: Optional[Union[str, Path]] = None, ): """Original HF‑Hub loader (unchanged download, shared parsing).""" assert has_hf_hub(True) cfg_path = download_from_hf(model_id, "config.json", cache_dir=cache_dir) cfg = load_cfg_from_json(cfg_path) return _parse_model_cfg(cfg, {"hf_hub_id": model_id, "source": "hf-hub"}) def load_model_config_from_path( model_path: Union[str, Path], ): """Load from ``<model_path>/config.json`` on the local filesystem.""" model_path = Path(model_path) cfg_file = model_path / "config.json" if not cfg_file.is_file(): raise FileNotFoundError(f"Config file not found: {cfg_file}") cfg = load_cfg_from_json(cfg_file) extra_fields = {"file": str(model_path), "source": "local-dir"} return _parse_model_cfg(cfg, extra_fields=extra_fields) def load_state_dict_from_hf( model_id: str, filename: str = HF_WEIGHTS_NAME, weights_only: bool = False, cache_dir: Optional[Union[str, Path]] = None, ): assert has_hf_hub(True) hf_model_id, hf_revision = hf_split(model_id) # Look for .safetensors alternatives and load from it if it exists if _has_safetensors: for safe_filename in _get_safe_alternatives(filename): try: cached_safe_file = hf_hub_download( repo_id=hf_model_id, filename=safe_filename, revision=hf_revision, cache_dir=cache_dir, ) _logger.info( f"[{model_id}] Safe alternative available for '{filename}' " f"(as '{safe_filename}'). Loading weights using safetensors.") return safetensors.torch.load_file(cached_safe_file, device="cpu") except EntryNotFoundError: pass # Otherwise, load using pytorch.load cached_file = hf_hub_download( hf_model_id, filename=filename, revision=hf_revision, cache_dir=cache_dir, ) _logger.debug(f"[{model_id}] Safe alternative not found for '{filename}'. Loading weights using default pytorch.") try: state_dict = torch.load(cached_file, map_location='cpu', weights_only=weights_only) except TypeError: state_dict = torch.load(cached_file, map_location='cpu') return state_dict _PREFERRED_FILES = ( "model.safetensors", "pytorch_model.bin", "pytorch_model.pth", "model.pth", "open_clip_model.safetensors", "open_clip_pytorch_model.safetensors", "open_clip_pytorch_model.bin", "open_clip_pytorch_model.pth", ) _EXT_PRIORITY = ('.safetensors', '.pth', '.pth.tar', '.bin') def load_state_dict_from_path( path: str, weights_only: bool = False, ): found_file = None for fname in _PREFERRED_FILES: p = path / fname if p.exists(): logging.info(f"Found preferred checkpoint: {p.name}") found_file = p break # fallback: first match per‑extension class for ext in _EXT_PRIORITY: files = sorted(path.glob(f"*{ext}")) if files: if len(files) > 1: logging.warning( f"Multiple {ext} checkpoints in {path}: {names}. " f"Using '{files[0].name}'." ) found_file = files[0] if not found_file: raise RuntimeError(f"No suitable checkpoints found in {path}.") try: state_dict = torch.load(found_file, map_location='cpu', weights_only=weights_only) except TypeError: state_dict = torch.load(found_file, map_location='cpu') return state_dict def load_custom_from_hf( model_id: str, filename: str, model: torch.nn.Module, cache_dir: Optional[Union[str, Path]] = None, ): assert has_hf_hub(True) hf_model_id, hf_revision = hf_split(model_id) cached_file = hf_hub_download( hf_model_id, filename=filename, revision=hf_revision, cache_dir=cache_dir, ) return model.load_pretrained(cached_file) def save_config_for_hf( model: torch.nn.Module, config_path: str, model_config: Optional[dict] = None, model_args: Optional[dict] = None ): model_config = model_config or {} hf_config = {} pretrained_cfg = filter_pretrained_cfg(model.pretrained_cfg, remove_source=True, remove_null=True) # set some values at root config level hf_config['architecture'] = pretrained_cfg.pop('architecture') hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes) # NOTE these attr saved for informational purposes, do not impact model build hf_config['num_features'] = model_config.pop('num_features', model.num_features) global_pool_type = model_config.pop('global_pool', getattr(model, 'global_pool', None)) if isinstance(global_pool_type, str) and global_pool_type: hf_config['global_pool'] = global_pool_type # Save class label info if 'labels' in model_config: _logger.warning( "'labels' as a config field for is deprecated. Please use 'label_names' and 'label_descriptions'." " Renaming provided 'labels' field to 'label_names'.") model_config.setdefault('label_names', model_config.pop('labels')) label_names = model_config.pop('label_names', None) if label_names: assert isinstance(label_names, (dict, list, tuple)) # map label id (classifier index) -> unique label name (ie synset for ImageNet, MID for OpenImages) # can be a dict id: name if there are id gaps, or tuple/list if no gaps. hf_config['label_names'] = label_names label_descriptions = model_config.pop('label_descriptions', None) if label_descriptions: assert isinstance(label_descriptions, dict) # maps label names -> descriptions hf_config['label_descriptions'] = label_descriptions if model_args: hf_config['model_args'] = model_args hf_config['pretrained_cfg'] = pretrained_cfg hf_config.update(model_config) with config_path.open('w') as f: json.dump(hf_config, f, indent=2) def save_for_hf( model: torch.nn.Module, save_directory: str, model_config: Optional[dict] = None, model_args: Optional[dict] = None, safe_serialization: Union[bool, Literal["both"]] = False, ): assert has_hf_hub(True) save_directory = Path(save_directory) save_directory.mkdir(exist_ok=True, parents=True) # Save model weights, either safely (using safetensors), or using legacy pytorch approach or both. tensors = model.state_dict() if safe_serialization is True or safe_serialization == "both": assert _has_safetensors, "`pip install safetensors` to use .safetensors" safetensors.torch.save_file(tensors, save_directory / HF_SAFE_WEIGHTS_NAME) if safe_serialization is False or safe_serialization == "both": torch.save(tensors, save_directory / HF_WEIGHTS_NAME) config_path = save_directory / 'config.json' save_config_for_hf( model, config_path, model_config=model_config, model_args=model_args, ) def push_to_hf_hub( model: torch.nn.Module, repo_id: str, commit_message: str = 'Add model', token: Optional[str] = None, revision: Optional[str] = None, private: bool = False, create_pr: bool = False, model_config: Optional[dict] = None, model_card: Optional[dict] = None, model_args: Optional[dict] = None, safe_serialization: Union[bool, Literal["both"]] = 'both', ): """ Arguments: (...) safe_serialization (`bool` or `"both"`, *optional*, defaults to `False`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). Can be set to `"both"` in order to push both safe and unsafe weights. """ # Create repo if it doesn't exist yet repo_url = create_repo(repo_id, token=token, private=private, exist_ok=True) # Infer complete repo_id from repo_url # Can be different from the input `repo_id` if repo_owner was implicit _, repo_owner, repo_name = repo_type_and_id_from_hf_id(repo_url) repo_id = f"{repo_owner}/{repo_name}" # Check if README file already exist in repo try: get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename="README.md", revision=revision)) has_readme = True except EntryNotFoundError: has_readme = False # Dump model and push to Hub with TemporaryDirectory() as tmpdir: # Save model weights and config. save_for_hf( model, tmpdir, model_config=model_config, model_args=model_args, safe_serialization=safe_serialization, ) # Add readme if it does not exist if not has_readme: model_card = model_card or {} model_name = repo_id.split('/')[-1] readme_path = Path(tmpdir) / "README.md" readme_text = generate_readme(model_card, model_name) readme_path.write_text(readme_text) # Upload model and return return upload_folder( repo_id=repo_id, folder_path=tmpdir, revision=revision, create_pr=create_pr, commit_message=commit_message, ) def generate_readme(model_card: dict, model_name: str): tags = model_card.get('tags', None) or ['image-classification', 'timm', 'transformers'] readme_text = "---\n" if tags: readme_text += "tags:\n" for t in tags: readme_text += f"- {t}\n" readme_text += f"library_name: {model_card.get('library_name', 'timm')}\n" readme_text += f"license: {model_card.get('license', 'apache-2.0')}\n" if 'license_name' in model_card: readme_text += f"license_name: {model_card.get('license_name')}\n" if 'license_link' in model_card: readme_text += f"license_link: {model_card.get('license_link')}\n" if 'details' in model_card and 'Dataset' in model_card['details']: readme_text += 'datasets:\n' if isinstance(model_card['details']['Dataset'], (tuple, list)): for d in model_card['details']['Dataset']: readme_text += f"- {d.lower()}\n" else: readme_text += f"- {model_card['details']['Dataset'].lower()}\n" if 'Pretrain Dataset' in model_card['details']: if isinstance(model_card['details']['Pretrain Dataset'], (tuple, list)): for d in model_card['details']['Pretrain Dataset']: readme_text += f"- {d.lower()}\n" else: readme_text += f"- {model_card['details']['Pretrain Dataset'].lower()}\n" readme_text += "---\n" readme_text += f"# Model card for {model_name}\n" if 'description' in model_card: readme_text += f"\n{model_card['description']}\n" if 'details' in model_card: readme_text += f"\n## Model Details\n" for k, v in model_card['details'].items(): if isinstance(v, (list, tuple)): readme_text += f"- **{k}:**\n" for vi in v: readme_text += f" - {vi}\n" elif isinstance(v, dict): readme_text += f"- **{k}:**\n" for ki, vi in v.items(): readme_text += f" - {ki}: {vi}\n" else: readme_text += f"- **{k}:** {v}\n" if 'usage' in model_card: readme_text += f"\n## Model Usage\n" readme_text += model_card['usage'] readme_text += '\n' if 'comparison' in model_card: readme_text += f"\n## Model Comparison\n" readme_text += model_card['comparison'] readme_text += '\n' if 'citation' in model_card: readme_text += f"\n## Citation\n" if not isinstance(model_card['citation'], (list, tuple)): citations = [model_card['citation']] else: citations = model_card['citation'] for c in citations: readme_text += f"```bibtex\n{c}\n```\n" return readme_text def _get_safe_alternatives(filename: str) -> Iterable[str]: """Returns potential safetensors alternatives for a given filename. Use case: When downloading a model from the Huggingface Hub, we first look if a .safetensors file exists and if yes, we use it. Main use case is filename "pytorch_model.bin" => check for "model.safetensors" or "pytorch_model.safetensors". """ if filename == HF_WEIGHTS_NAME: yield HF_SAFE_WEIGHTS_NAME if filename == HF_OPEN_CLIP_WEIGHTS_NAME: yield HF_OPEN_CLIP_SAFE_WEIGHTS_NAME if filename not in (HF_WEIGHTS_NAME, HF_OPEN_CLIP_WEIGHTS_NAME) and filename.endswith(".bin"): yield filename[:-4] + ".safetensors"
pytorch-image-models/timm/models/_hub.py/0
{ "file_path": "pytorch-image-models/timm/models/_hub.py", "repo_id": "pytorch-image-models", "token_count": 8620 }
264
""" ConvMixer """ from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d from ._registry import register_model, generate_default_cfgs from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq __all__ = ['ConvMixer'] class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class ConvMixer(nn.Module): def __init__( self, dim, depth, kernel_size=9, patch_size=7, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0., act_layer=nn.GELU, **kwargs, ): super().__init__() self.num_classes = num_classes self.num_features = self.head_hidden_size = dim self.grad_checkpointing = False self.stem = nn.Sequential( nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size), act_layer(), nn.BatchNorm2d(dim) ) self.blocks = nn.Sequential( *[nn.Sequential( Residual(nn.Sequential( nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"), act_layer(), nn.BatchNorm2d(dim) )), nn.Conv2d(dim, dim, kernel_size=1), act_layer(), nn.BatchNorm2d(dim) ) for i in range(depth)] ) self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^stem', blocks=r'^blocks\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.pooling(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_convmixer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for ConvMixer models.') return build_model_with_cfg(ConvMixer, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .96, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', 'first_conv': 'stem.0', **kwargs } default_cfgs = generate_default_cfgs({ 'convmixer_1536_20.in1k': _cfg(hf_hub_id='timm/'), 'convmixer_768_32.in1k': _cfg(hf_hub_id='timm/'), 'convmixer_1024_20_ks9_p14.in1k': _cfg(hf_hub_id='timm/') }) @register_model def convmixer_1536_20(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs) return _create_convmixer('convmixer_1536_20', pretrained, **model_args) @register_model def convmixer_768_32(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, act_layer=nn.ReLU, **kwargs) return _create_convmixer('convmixer_768_32', pretrained, **model_args) @register_model def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs) return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args)
pytorch-image-models/timm/models/convmixer.py/0
{ "file_path": "pytorch-image-models/timm/models/convmixer.py", "repo_id": "pytorch-image-models", "token_count": 2244 }
265