text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
""" A script to convert Stable Diffusion 3.5 ControlNet checkpoints to the Diffusers format. Example: Convert a SD3.5 ControlNet checkpoint to Diffusers format using local file: ```bash python scripts/convert_sd3_controlnet_to_diffusers.py \ --checkpoint_path "path/to/local/sd3.5_large_controlnet_canny.safetensors" \ --output_path "output/sd35-controlnet-canny" \ --dtype "fp16" # optional, defaults to fp32 ``` Or download and convert from HuggingFace repository: ```bash python scripts/convert_sd3_controlnet_to_diffusers.py \ --original_state_dict_repo_id "stabilityai/stable-diffusion-3.5-controlnets" \ --filename "sd3.5_large_controlnet_canny.safetensors" \ --output_path "/raid/yiyi/sd35-controlnet-canny-diffusers" \ --dtype "fp32" # optional, defaults to fp32 ``` Note: The script supports the following ControlNet types from SD3.5: - Canny edge detection - Depth estimation - Blur detection The checkpoint files can be downloaded from: https://huggingface.co/stabilityai/stable-diffusion-3.5-controlnets """ import argparse import safetensors.torch import torch from huggingface_hub import hf_hub_download from diffusers import SD3ControlNetModel parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str, default=None, help="Path to local checkpoint file") parser.add_argument( "--original_state_dict_repo_id", type=str, default=None, help="HuggingFace repo ID containing the checkpoint" ) parser.add_argument("--filename", type=str, default=None, help="Filename of the checkpoint in the HF repo") parser.add_argument("--output_path", type=str, required=True, help="Path to save the converted model") parser.add_argument( "--dtype", type=str, default="fp32", help="Data type for the converted model (fp16, bf16, or fp32)" ) args = parser.parse_args() def load_original_checkpoint(args): if args.original_state_dict_repo_id is not None: if args.filename is None: raise ValueError("When using `original_state_dict_repo_id`, `filename` must also be specified") print(f"Downloading checkpoint from {args.original_state_dict_repo_id}/{args.filename}") ckpt_path = hf_hub_download(repo_id=args.original_state_dict_repo_id, filename=args.filename) elif args.checkpoint_path is not None: print(f"Loading checkpoint from local path: {args.checkpoint_path}") ckpt_path = args.checkpoint_path else: raise ValueError("Please provide either `original_state_dict_repo_id` or a local `checkpoint_path`") original_state_dict = safetensors.torch.load_file(ckpt_path) return original_state_dict def convert_sd3_controlnet_checkpoint_to_diffusers(original_state_dict): converted_state_dict = {} # Direct mappings for controlnet blocks for i in range(19): # 19 controlnet blocks converted_state_dict[f"controlnet_blocks.{i}.weight"] = original_state_dict[f"controlnet_blocks.{i}.weight"] converted_state_dict[f"controlnet_blocks.{i}.bias"] = original_state_dict[f"controlnet_blocks.{i}.bias"] # Positional embeddings converted_state_dict["pos_embed_input.proj.weight"] = original_state_dict["pos_embed_input.proj.weight"] converted_state_dict["pos_embed_input.proj.bias"] = original_state_dict["pos_embed_input.proj.bias"] # Time and text embeddings time_text_mappings = { "time_text_embed.timestep_embedder.linear_1.weight": "time_text_embed.timestep_embedder.linear_1.weight", "time_text_embed.timestep_embedder.linear_1.bias": "time_text_embed.timestep_embedder.linear_1.bias", "time_text_embed.timestep_embedder.linear_2.weight": "time_text_embed.timestep_embedder.linear_2.weight", "time_text_embed.timestep_embedder.linear_2.bias": "time_text_embed.timestep_embedder.linear_2.bias", "time_text_embed.text_embedder.linear_1.weight": "time_text_embed.text_embedder.linear_1.weight", "time_text_embed.text_embedder.linear_1.bias": "time_text_embed.text_embedder.linear_1.bias", "time_text_embed.text_embedder.linear_2.weight": "time_text_embed.text_embedder.linear_2.weight", "time_text_embed.text_embedder.linear_2.bias": "time_text_embed.text_embedder.linear_2.bias", } for new_key, old_key in time_text_mappings.items(): if old_key in original_state_dict: converted_state_dict[new_key] = original_state_dict[old_key] # Transformer blocks for i in range(19): # Split QKV into separate Q, K, V qkv_weight = original_state_dict[f"transformer_blocks.{i}.attn.qkv.weight"] qkv_bias = original_state_dict[f"transformer_blocks.{i}.attn.qkv.bias"] q, k, v = torch.chunk(qkv_weight, 3, dim=0) q_bias, k_bias, v_bias = torch.chunk(qkv_bias, 3, dim=0) block_mappings = { f"transformer_blocks.{i}.attn.to_q.weight": q, f"transformer_blocks.{i}.attn.to_q.bias": q_bias, f"transformer_blocks.{i}.attn.to_k.weight": k, f"transformer_blocks.{i}.attn.to_k.bias": k_bias, f"transformer_blocks.{i}.attn.to_v.weight": v, f"transformer_blocks.{i}.attn.to_v.bias": v_bias, # Output projections f"transformer_blocks.{i}.attn.to_out.0.weight": original_state_dict[ f"transformer_blocks.{i}.attn.proj.weight" ], f"transformer_blocks.{i}.attn.to_out.0.bias": original_state_dict[ f"transformer_blocks.{i}.attn.proj.bias" ], # Feed forward f"transformer_blocks.{i}.ff.net.0.proj.weight": original_state_dict[ f"transformer_blocks.{i}.mlp.fc1.weight" ], f"transformer_blocks.{i}.ff.net.0.proj.bias": original_state_dict[f"transformer_blocks.{i}.mlp.fc1.bias"], f"transformer_blocks.{i}.ff.net.2.weight": original_state_dict[f"transformer_blocks.{i}.mlp.fc2.weight"], f"transformer_blocks.{i}.ff.net.2.bias": original_state_dict[f"transformer_blocks.{i}.mlp.fc2.bias"], # Norms f"transformer_blocks.{i}.norm1.linear.weight": original_state_dict[ f"transformer_blocks.{i}.adaLN_modulation.1.weight" ], f"transformer_blocks.{i}.norm1.linear.bias": original_state_dict[ f"transformer_blocks.{i}.adaLN_modulation.1.bias" ], } converted_state_dict.update(block_mappings) return converted_state_dict def main(args): original_ckpt = load_original_checkpoint(args) original_dtype = next(iter(original_ckpt.values())).dtype # Initialize dtype with fp32 as default if args.dtype == "fp16": dtype = torch.float16 elif args.dtype == "bf16": dtype = torch.bfloat16 elif args.dtype == "fp32": dtype = torch.float32 else: raise ValueError(f"Unsupported dtype: {args.dtype}. Must be one of: fp16, bf16, fp32") if dtype != original_dtype: print( f"Converting checkpoint from {original_dtype} to {dtype}. This can lead to unexpected results, proceed with caution." ) converted_controlnet_state_dict = convert_sd3_controlnet_checkpoint_to_diffusers(original_ckpt) controlnet = SD3ControlNetModel( patch_size=2, in_channels=16, num_layers=19, attention_head_dim=64, num_attention_heads=38, joint_attention_dim=None, caption_projection_dim=2048, pooled_projection_dim=2048, out_channels=16, pos_embed_max_size=None, pos_embed_type=None, use_pos_embed=False, force_zeros_for_pooled_projection=False, ) controlnet.load_state_dict(converted_controlnet_state_dict, strict=True) print(f"Saving SD3 ControlNet in Diffusers format in {args.output_path}.") controlnet.to(dtype).save_pretrained(args.output_path) if __name__ == "__main__": main(args)
diffusers/scripts/convert_sd3_controlnet_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_sd3_controlnet_to_diffusers.py", "repo_id": "diffusers", "token_count": 3453 }
157
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conversion script for the Versatile Stable Diffusion checkpoints.""" import argparse from argparse import Namespace import torch from transformers import ( CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, VersatileDiffusionPipeline, ) from diffusers.pipelines.versatile_diffusion.modeling_text_unet import UNetFlatConditionModel SCHEDULER_CONFIG = Namespace( **{ "beta_linear_start": 0.00085, "beta_linear_end": 0.012, "timesteps": 1000, "scale_factor": 0.18215, } ) IMAGE_UNET_CONFIG = Namespace( **{ "input_channels": 4, "model_channels": 320, "output_channels": 4, "num_noattn_blocks": [2, 2, 2, 2], "channel_mult": [1, 2, 4, 4], "with_attn": [True, True, True, False], "num_heads": 8, "context_dim": 768, "use_checkpoint": True, } ) TEXT_UNET_CONFIG = Namespace( **{ "input_channels": 768, "model_channels": 320, "output_channels": 768, "num_noattn_blocks": [2, 2, 2, 2], "channel_mult": [1, 2, 4, 4], "second_dim": [4, 4, 4, 4], "with_attn": [True, True, True, False], "num_heads": 8, "context_dim": 768, "use_checkpoint": True, } ) AUTOENCODER_CONFIG = Namespace( **{ "double_z": True, "z_channels": 4, "resolution": 256, "in_channels": 3, "out_ch": 3, "ch": 128, "ch_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_resolutions": [], "dropout": 0.0, } ) def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("nin_shortcut", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("q.weight", "query.weight") new_item = new_item.replace("q.bias", "query.bias") new_item = new_item.replace("k.weight", "key.weight") new_item = new_item.replace("k.bias", "key.bias") new_item = new_item.replace("v.weight", "value.weight") new_item = new_item.replace("v.bias", "value.bias") new_item = new_item.replace("proj_out.weight", "proj_attn.weight") new_item = new_item.replace("proj_out.bias", "proj_attn.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] elif path["old"] in old_checkpoint: checkpoint[new_path] = old_checkpoint[path["old"]] def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ["query.weight", "key.weight", "value.weight"] for key in keys: if ".".join(key.split(".")[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif "proj_attn.weight" in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def create_image_unet_diffusers_config(unet_params): """ Creates a config for the diffusers based on the config of the VD model. """ block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlock2D" if unet_params.with_attn[i] else "DownBlock2D" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlock2D" if unet_params.with_attn[-i - 1] else "UpBlock2D" up_block_types.append(block_type) resolution //= 2 if not all(n == unet_params.num_noattn_blocks[0] for n in unet_params.num_noattn_blocks): raise ValueError("Not all num_res_blocks are equal, which is not supported in this script.") config = { "sample_size": None, "in_channels": unet_params.input_channels, "out_channels": unet_params.output_channels, "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params.num_noattn_blocks[0], "cross_attention_dim": unet_params.context_dim, "attention_head_dim": unet_params.num_heads, } return config def create_text_unet_diffusers_config(unet_params): """ Creates a config for the diffusers based on the config of the VD model. """ block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlockFlat" if unet_params.with_attn[i] else "DownBlockFlat" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlockFlat" if unet_params.with_attn[-i - 1] else "UpBlockFlat" up_block_types.append(block_type) resolution //= 2 if not all(n == unet_params.num_noattn_blocks[0] for n in unet_params.num_noattn_blocks): raise ValueError("Not all num_res_blocks are equal, which is not supported in this script.") config = { "sample_size": None, "in_channels": (unet_params.input_channels, 1, 1), "out_channels": (unet_params.output_channels, 1, 1), "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params.num_noattn_blocks[0], "cross_attention_dim": unet_params.context_dim, "attention_head_dim": unet_params.num_heads, } return config def create_vae_diffusers_config(vae_params): """ Creates a config for the diffusers based on the config of the VD model. """ block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult] down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) config = { "sample_size": vae_params.resolution, "in_channels": vae_params.in_channels, "out_channels": vae_params.out_ch, "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "latent_channels": vae_params.z_channels, "layers_per_block": vae_params.num_res_blocks, } return config def create_diffusers_scheduler(original_config): schedular = DDIMScheduler( num_train_timesteps=original_config.model.params.timesteps, beta_start=original_config.model.params.linear_start, beta_end=original_config.model.params.linear_end, beta_schedule="scaled_linear", ) return schedular def convert_vd_unet_checkpoint(checkpoint, config, unet_key, extract_ema=False): """ Takes a state dict and a config, and returns a converted checkpoint. """ # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100: print("Checkpoint has both EMA and non-EMA weights.") if extract_ema: print( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: print( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["model.diffusion_model.time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["model.diffusion_model.time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["model.diffusion_model.time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["model.diffusion_model.time_embed.2.bias"] new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.bias" ) elif f"input_blocks.{i}.0.weight" in unet_state_dict: # text_unet uses linear layers in place of downsamplers shape = unet_state_dict[f"input_blocks.{i}.0.weight"].shape if shape[0] != shape[1]: continue new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.bias" ) paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if ["conv.weight", "conv.bias"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.weight", "conv.bias"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] elif f"output_blocks.{i}.1.weight" in unet_state_dict: # text_unet uses linear layers in place of upsamplers shape = unet_state_dict[f"output_blocks.{i}.1.weight"].shape if shape[0] != shape[1]: continue new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.weight"] = unet_state_dict.pop( f"output_blocks.{i}.1.weight" ) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.bias"] = unet_state_dict.pop( f"output_blocks.{i}.1.bias" ) # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] elif f"output_blocks.{i}.2.weight" in unet_state_dict: # text_unet uses linear layers in place of upsamplers shape = unet_state_dict[f"output_blocks.{i}.2.weight"].shape if shape[0] != shape[1]: continue new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.weight"] = unet_state_dict.pop( f"output_blocks.{i}.2.weight" ) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.bias"] = unet_state_dict.pop( f"output_blocks.{i}.2.bias" ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = unet_state_dict[old_path] return new_checkpoint def convert_vd_vae_checkpoint(checkpoint, config): # extract state dict for VAE vae_state_dict = {} keys = list(checkpoint.keys()) for key in keys: vae_state_dict[key] = checkpoint.get(key) new_checkpoint = {} new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--unet_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--vae_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--optimus_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() scheduler_config = SCHEDULER_CONFIG num_train_timesteps = scheduler_config.timesteps beta_start = scheduler_config.beta_linear_start beta_end = scheduler_config.beta_linear_end if args.scheduler_type == "pndm": scheduler = PNDMScheduler( beta_end=beta_end, beta_schedule="scaled_linear", beta_start=beta_start, num_train_timesteps=num_train_timesteps, skip_prk_steps=True, steps_offset=1, ) elif args.scheduler_type == "lms": scheduler = LMSDiscreteScheduler(beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear") elif args.scheduler_type == "euler": scheduler = EulerDiscreteScheduler(beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear") elif args.scheduler_type == "euler-ancestral": scheduler = EulerAncestralDiscreteScheduler( beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear" ) elif args.scheduler_type == "dpm": scheduler = DPMSolverMultistepScheduler( beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear" ) elif args.scheduler_type == "ddim": scheduler = DDIMScheduler( beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) else: raise ValueError(f"Scheduler of type {args.scheduler_type} doesn't exist!") # Convert the UNet2DConditionModel models. if args.unet_checkpoint_path is not None: # image UNet image_unet_config = create_image_unet_diffusers_config(IMAGE_UNET_CONFIG) checkpoint = torch.load(args.unet_checkpoint_path) converted_image_unet_checkpoint = convert_vd_unet_checkpoint( checkpoint, image_unet_config, unet_key="model.diffusion_model.unet_image.", extract_ema=args.extract_ema ) image_unet = UNet2DConditionModel(**image_unet_config) image_unet.load_state_dict(converted_image_unet_checkpoint) # text UNet text_unet_config = create_text_unet_diffusers_config(TEXT_UNET_CONFIG) converted_text_unet_checkpoint = convert_vd_unet_checkpoint( checkpoint, text_unet_config, unet_key="model.diffusion_model.unet_text.", extract_ema=args.extract_ema ) text_unet = UNetFlatConditionModel(**text_unet_config) text_unet.load_state_dict(converted_text_unet_checkpoint) # Convert the VAE model. if args.vae_checkpoint_path is not None: vae_config = create_vae_diffusers_config(AUTOENCODER_CONFIG) checkpoint = torch.load(args.vae_checkpoint_path) converted_vae_checkpoint = convert_vd_vae_checkpoint(checkpoint, vae_config) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") image_feature_extractor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14") text_encoder = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") pipe = VersatileDiffusionPipeline( scheduler=scheduler, tokenizer=tokenizer, image_feature_extractor=image_feature_extractor, text_encoder=text_encoder, image_encoder=image_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, ) pipe.save_pretrained(args.dump_path)
diffusers/scripts/convert_versatile_diffusion_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_versatile_diffusion_to_diffusers.py", "repo_id": "diffusers", "token_count": 14926 }
158
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers pkgs_to_check_at_runtime = "python requests filelock numpy".split() for pkg in pkgs_to_check_at_runtime: if pkg in deps: require_version_core(deps[pkg]) else: raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def dep_version_check(pkg, hint=None): require_version(deps[pkg], hint)
diffusers/src/diffusers/dependency_versions_check.py/0
{ "file_path": "diffusers/src/diffusers/dependency_versions_check.py", "repo_id": "diffusers", "token_count": 381 }
159
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import json import os from pathlib import Path from typing import Callable, Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from huggingface_hub import model_info from huggingface_hub.constants import HF_HUB_OFFLINE from ..models.modeling_utils import ModelMixin, load_state_dict from ..utils import ( USE_PEFT_BACKEND, _get_model_file, convert_state_dict_to_diffusers, convert_state_dict_to_peft, delete_adapter_layers, deprecate, get_adapter_name, is_accelerate_available, is_peft_available, is_peft_version, is_transformers_available, is_transformers_version, logging, recurse_remove_peft_layers, scale_lora_layers, set_adapter_layers, set_weights_and_activate_adapters, ) from ..utils.peft_utils import _create_lora_config from ..utils.state_dict_utils import _load_sft_state_dict_metadata if is_transformers_available(): from transformers import PreTrainedModel if is_peft_available(): from peft.tuners.tuners_utils import BaseTunerLayer if is_accelerate_available(): from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module logger = logging.get_logger(__name__) LORA_WEIGHT_NAME = "pytorch_lora_weights.bin" LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors" LORA_ADAPTER_METADATA_KEY = "lora_adapter_metadata" def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False, adapter_names=None): """ Fuses LoRAs for the text encoder. Args: text_encoder (`torch.nn.Module`): The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder` attribute. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]` or `str`): The names of the adapters to use. """ merge_kwargs = {"safe_merge": safe_fusing} for module in text_encoder.modules(): if isinstance(module, BaseTunerLayer): if lora_scale != 1.0: module.scale_layer(lora_scale) # For BC with previous PEFT versions, we need to check the signature # of the `merge` method to see if it supports the `adapter_names` argument. supported_merge_kwargs = list(inspect.signature(module.merge).parameters) if "adapter_names" in supported_merge_kwargs: merge_kwargs["adapter_names"] = adapter_names elif "adapter_names" not in supported_merge_kwargs and adapter_names is not None: raise ValueError( "The `adapter_names` argument is not supported with your PEFT version. " "Please upgrade to the latest version of PEFT. `pip install -U peft`" ) module.merge(**merge_kwargs) def unfuse_text_encoder_lora(text_encoder): """ Unfuses LoRAs for the text encoder. Args: text_encoder (`torch.nn.Module`): The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder` attribute. """ for module in text_encoder.modules(): if isinstance(module, BaseTunerLayer): module.unmerge() def set_adapters_for_text_encoder( adapter_names: Union[List[str], str], text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821 text_encoder_weights: Optional[Union[float, List[float], List[None]]] = None, ): """ Sets the adapter layers for the text encoder. Args: adapter_names (`List[str]` or `str`): The names of the adapters to use. text_encoder (`torch.nn.Module`, *optional*): The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder` attribute. text_encoder_weights (`List[float]`, *optional*): The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters. """ if text_encoder is None: raise ValueError( "The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead." ) def process_weights(adapter_names, weights): # Expand weights into a list, one entry per adapter # e.g. for 2 adapters: 7 -> [7,7] ; [3, None] -> [3, None] if not isinstance(weights, list): weights = [weights] * len(adapter_names) if len(adapter_names) != len(weights): raise ValueError( f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}" ) # Set None values to default of 1.0 # e.g. [7,7] -> [7,7] ; [3, None] -> [3,1] weights = [w if w is not None else 1.0 for w in weights] return weights adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names text_encoder_weights = process_weights(adapter_names, text_encoder_weights) set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights) def disable_lora_for_text_encoder(text_encoder: Optional["PreTrainedModel"] = None): """ Disables the LoRA layers for the text encoder. Args: text_encoder (`torch.nn.Module`, *optional*): The text encoder module to disable the LoRA layers for. If `None`, it will try to get the `text_encoder` attribute. """ if text_encoder is None: raise ValueError("Text Encoder not found.") set_adapter_layers(text_encoder, enabled=False) def enable_lora_for_text_encoder(text_encoder: Optional["PreTrainedModel"] = None): """ Enables the LoRA layers for the text encoder. Args: text_encoder (`torch.nn.Module`, *optional*): The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder` attribute. """ if text_encoder is None: raise ValueError("Text Encoder not found.") set_adapter_layers(text_encoder, enabled=True) def _remove_text_encoder_monkey_patch(text_encoder): recurse_remove_peft_layers(text_encoder) if getattr(text_encoder, "peft_config", None) is not None: del text_encoder.peft_config text_encoder._hf_peft_config_loaded = None def _fetch_state_dict( pretrained_model_name_or_path_or_dict, weight_name, use_safetensors, local_files_only, cache_dir, force_download, proxies, token, revision, subfolder, user_agent, allow_pickle, metadata=None, ): model_file = None if not isinstance(pretrained_model_name_or_path_or_dict, dict): # Let's first try to load .safetensors weights if (use_safetensors and weight_name is None) or ( weight_name is not None and weight_name.endswith(".safetensors") ): try: # Here we're relaxing the loading check to enable more Inference API # friendliness where sometimes, it's not at all possible to automatically # determine `weight_name`. if weight_name is None: weight_name = _best_guess_weight_name( pretrained_model_name_or_path_or_dict, file_extension=".safetensors", local_files_only=local_files_only, ) model_file = _get_model_file( pretrained_model_name_or_path_or_dict, weights_name=weight_name or LORA_WEIGHT_NAME_SAFE, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) state_dict = safetensors.torch.load_file(model_file, device="cpu") metadata = _load_sft_state_dict_metadata(model_file) except (IOError, safetensors.SafetensorError) as e: if not allow_pickle: raise e # try loading non-safetensors weights model_file = None metadata = None pass if model_file is None: if weight_name is None: weight_name = _best_guess_weight_name( pretrained_model_name_or_path_or_dict, file_extension=".bin", local_files_only=local_files_only ) model_file = _get_model_file( pretrained_model_name_or_path_or_dict, weights_name=weight_name or LORA_WEIGHT_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) state_dict = load_state_dict(model_file) metadata = None else: state_dict = pretrained_model_name_or_path_or_dict return state_dict, metadata def _best_guess_weight_name( pretrained_model_name_or_path_or_dict, file_extension=".safetensors", local_files_only=False ): if local_files_only or HF_HUB_OFFLINE: raise ValueError("When using the offline mode, you must specify a `weight_name`.") targeted_files = [] if os.path.isfile(pretrained_model_name_or_path_or_dict): return elif os.path.isdir(pretrained_model_name_or_path_or_dict): targeted_files = [f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension)] else: files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)] if len(targeted_files) == 0: return # "scheduler" does not correspond to a LoRA checkpoint. # "optimizer" does not correspond to a LoRA checkpoint # only top-level checkpoints are considered and not the other ones, hence "checkpoint". unallowed_substrings = {"scheduler", "optimizer", "checkpoint"} targeted_files = list( filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files) ) if any(f.endswith(LORA_WEIGHT_NAME) for f in targeted_files): targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME), targeted_files)) elif any(f.endswith(LORA_WEIGHT_NAME_SAFE) for f in targeted_files): targeted_files = list(filter(lambda x: x.endswith(LORA_WEIGHT_NAME_SAFE), targeted_files)) if len(targeted_files) > 1: logger.warning( f"Provided path contains more than one weights file in the {file_extension} format. `{targeted_files[0]}` is going to be loaded, for precise control, specify a `weight_name` in `load_lora_weights`." ) weight_name = targeted_files[0] return weight_name def _pack_dict_with_prefix(state_dict, prefix): sd_with_prefix = {f"{prefix}.{key}": value for key, value in state_dict.items()} return sd_with_prefix def _load_lora_into_text_encoder( state_dict, network_alphas, text_encoder, prefix=None, lora_scale=1.0, text_encoder_name="text_encoder", adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False, metadata=None, ): from ..hooks.group_offloading import _maybe_remove_and_reapply_group_offloading if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") if network_alphas and metadata: raise ValueError("`network_alphas` and `metadata` cannot be specified both at the same time.") peft_kwargs = {} if low_cpu_mem_usage: if not is_peft_version(">=", "0.13.1"): raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`." ) if not is_transformers_version(">", "4.45.2"): # Note from sayakpaul: It's not in `transformers` stable yet. # https://github.com/huggingface/transformers/pull/33725/ raise ValueError( "`low_cpu_mem_usage=True` is not compatible with this `transformers` version. Please update it with `pip install -U transformers`." ) peft_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918), # then the `state_dict` keys should have `unet_name` and/or `text_encoder_name` as # their prefixes. prefix = text_encoder_name if prefix is None else prefix # Safe prefix to check with. if hotswap and any(text_encoder_name in key for key in state_dict.keys()): raise ValueError("At the moment, hotswapping is not supported for text encoders, please pass `hotswap=False`.") # Load the layers corresponding to text encoder and make necessary adjustments. if prefix is not None: state_dict = {k.removeprefix(f"{prefix}."): v for k, v in state_dict.items() if k.startswith(f"{prefix}.")} if metadata is not None: metadata = {k.removeprefix(f"{prefix}."): v for k, v in metadata.items() if k.startswith(f"{prefix}.")} if len(state_dict) > 0: logger.info(f"Loading {prefix}.") rank = {} state_dict = convert_state_dict_to_diffusers(state_dict) # convert state dict state_dict = convert_state_dict_to_peft(state_dict) for name, _ in text_encoder.named_modules(): if name.endswith((".q_proj", ".k_proj", ".v_proj", ".out_proj", ".fc1", ".fc2")): rank_key = f"{name}.lora_B.weight" if rank_key in state_dict: rank[rank_key] = state_dict[rank_key].shape[1] if network_alphas is not None: alpha_keys = [k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix] network_alphas = {k.removeprefix(f"{prefix}."): v for k, v in network_alphas.items() if k in alpha_keys} # create `LoraConfig` lora_config = _create_lora_config(state_dict, network_alphas, metadata, rank, is_unet=False) # adapter_name if adapter_name is None: adapter_name = get_adapter_name(text_encoder) # <Unsafe code is_model_cpu_offload, is_sequential_cpu_offload, is_group_offload = _func_optionally_disable_offloading( _pipeline ) # inject LoRA layers and load the state dict # in transformers we automatically check whether the adapter name is already in use or not text_encoder.load_adapter( adapter_name=adapter_name, adapter_state_dict=state_dict, peft_config=lora_config, **peft_kwargs, ) # scale LoRA layers with `lora_scale` scale_lora_layers(text_encoder, weight=lora_scale) text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype) # Offload back. if is_model_cpu_offload: _pipeline.enable_model_cpu_offload() elif is_sequential_cpu_offload: _pipeline.enable_sequential_cpu_offload() elif is_group_offload: for component in _pipeline.components.values(): if isinstance(component, torch.nn.Module): _maybe_remove_and_reapply_group_offloading(component) # Unsafe code /> if prefix is not None and not state_dict: model_class_name = text_encoder.__class__.__name__ logger.warning( f"No LoRA keys associated to {model_class_name} found with the {prefix=}. " "This is safe to ignore if LoRA state dict didn't originally have any " f"{model_class_name} related params. You can also try specifying `prefix=None` " "to resolve the warning. Otherwise, open an issue if you think it's unexpected: " "https://github.com/huggingface/diffusers/issues/new" ) def _func_optionally_disable_offloading(_pipeline): """ Optionally removes offloading in case the pipeline has been already sequentially offloaded to CPU. Args: _pipeline (`DiffusionPipeline`): The pipeline to disable offloading for. Returns: tuple: A tuple indicating if `is_model_cpu_offload` or `is_sequential_cpu_offload` or `is_group_offload` is True. """ from ..hooks.group_offloading import _is_group_offload_enabled is_model_cpu_offload = False is_sequential_cpu_offload = False is_group_offload = False if _pipeline is not None and _pipeline.hf_device_map is None: for _, component in _pipeline.components.items(): if not isinstance(component, nn.Module): continue is_group_offload = is_group_offload or _is_group_offload_enabled(component) if not hasattr(component, "_hf_hook"): continue is_model_cpu_offload = is_model_cpu_offload or isinstance(component._hf_hook, CpuOffload) is_sequential_cpu_offload = is_sequential_cpu_offload or ( isinstance(component._hf_hook, AlignDevicesHook) or hasattr(component._hf_hook, "hooks") and isinstance(component._hf_hook.hooks[0], AlignDevicesHook) ) if is_sequential_cpu_offload or is_model_cpu_offload: logger.info( "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again." ) for _, component in _pipeline.components.items(): if not isinstance(component, nn.Module) or not hasattr(component, "_hf_hook"): continue remove_hook_from_module(component, recurse=is_sequential_cpu_offload) return (is_model_cpu_offload, is_sequential_cpu_offload, is_group_offload) class LoraBaseMixin: """Utility class for handling LoRAs.""" _lora_loadable_modules = [] _merged_adapters = set() @property def lora_scale(self) -> float: """ Returns the lora scale which can be set at run time by the pipeline. # if `_lora_scale` has not been set, return 1. """ return self._lora_scale if hasattr(self, "_lora_scale") else 1.0 @property def num_fused_loras(self): """Returns the number of LoRAs that have been fused.""" return len(self._merged_adapters) @property def fused_loras(self): """Returns names of the LoRAs that have been fused.""" return self._merged_adapters def load_lora_weights(self, **kwargs): raise NotImplementedError("`load_lora_weights()` is not implemented.") @classmethod def save_lora_weights(cls, **kwargs): raise NotImplementedError("`save_lora_weights()` not implemented.") @classmethod def lora_state_dict(cls, **kwargs): raise NotImplementedError("`lora_state_dict()` is not implemented.") def unload_lora_weights(self): """ Unloads the LoRA parameters. Examples: ```python >>> # Assuming `pipeline` is already loaded with the LoRA parameters. >>> pipeline.unload_lora_weights() >>> ... ``` """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.unload_lora() elif issubclass(model.__class__, PreTrainedModel): _remove_text_encoder_monkey_patch(model) def fuse_lora( self, components: List[str] = [], lora_scale: float = 1.0, safe_fusing: bool = False, adapter_names: Optional[List[str]] = None, **kwargs, ): r""" Fuses the LoRA parameters into the original parameters of the corresponding blocks. <Tip warning={true}> This is an experimental API. </Tip> Args: components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into. lora_scale (`float`, defaults to 1.0): Controls how much to influence the outputs with the LoRA parameters. safe_fusing (`bool`, defaults to `False`): Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them. adapter_names (`List[str]`, *optional*): Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused. Example: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.fuse_lora(lora_scale=0.7) ``` """ if "fuse_unet" in kwargs: depr_message = "Passing `fuse_unet` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_unet` will be removed in a future version." deprecate( "fuse_unet", "1.0.0", depr_message, ) if "fuse_transformer" in kwargs: depr_message = "Passing `fuse_transformer` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_transformer` will be removed in a future version." deprecate( "fuse_transformer", "1.0.0", depr_message, ) if "fuse_text_encoder" in kwargs: depr_message = "Passing `fuse_text_encoder` to `fuse_lora()` is deprecated and will be ignored. Please use the `components` argument and provide a list of the components whose LoRAs are to be fused. `fuse_text_encoder` will be removed in a future version." deprecate( "fuse_text_encoder", "1.0.0", depr_message, ) if len(components) == 0: raise ValueError("`components` cannot be an empty list.") # Need to retrieve the names as `adapter_names` can be None. So we cannot directly use it # in `self._merged_adapters = self._merged_adapters | merged_adapter_names`. merged_adapter_names = set() for fuse_component in components: if fuse_component not in self._lora_loadable_modules: raise ValueError(f"{fuse_component} is not found in {self._lora_loadable_modules=}.") model = getattr(self, fuse_component, None) if model is not None: # check if diffusers model if issubclass(model.__class__, ModelMixin): model.fuse_lora(lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names) for module in model.modules(): if isinstance(module, BaseTunerLayer): merged_adapter_names.update(set(module.merged_adapters)) # handle transformers models. if issubclass(model.__class__, PreTrainedModel): fuse_text_encoder_lora( model, lora_scale=lora_scale, safe_fusing=safe_fusing, adapter_names=adapter_names ) for module in model.modules(): if isinstance(module, BaseTunerLayer): merged_adapter_names.update(set(module.merged_adapters)) self._merged_adapters = self._merged_adapters | merged_adapter_names def unfuse_lora(self, components: List[str] = [], **kwargs): r""" Reverses the effect of [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora). <Tip warning={true}> This is an experimental API. </Tip> Args: components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from. unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters. unfuse_text_encoder (`bool`, defaults to `True`): Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the LoRA parameters then it won't have any effect. """ if "unfuse_unet" in kwargs: depr_message = "Passing `unfuse_unet` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_unet` will be removed in a future version." deprecate( "unfuse_unet", "1.0.0", depr_message, ) if "unfuse_transformer" in kwargs: depr_message = "Passing `unfuse_transformer` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_transformer` will be removed in a future version." deprecate( "unfuse_transformer", "1.0.0", depr_message, ) if "unfuse_text_encoder" in kwargs: depr_message = "Passing `unfuse_text_encoder` to `unfuse_lora()` is deprecated and will be ignored. Please use the `components` argument. `unfuse_text_encoder` will be removed in a future version." deprecate( "unfuse_text_encoder", "1.0.0", depr_message, ) if len(components) == 0: raise ValueError("`components` cannot be an empty list.") for fuse_component in components: if fuse_component not in self._lora_loadable_modules: raise ValueError(f"{fuse_component} is not found in {self._lora_loadable_modules=}.") model = getattr(self, fuse_component, None) if model is not None: if issubclass(model.__class__, (ModelMixin, PreTrainedModel)): for module in model.modules(): if isinstance(module, BaseTunerLayer): for adapter in set(module.merged_adapters): if adapter and adapter in self._merged_adapters: self._merged_adapters = self._merged_adapters - {adapter} module.unmerge() def set_adapters( self, adapter_names: Union[List[str], str], adapter_weights: Optional[Union[float, Dict, List[float], List[Dict]]] = None, ): """ Set the currently active adapters for use in the pipeline. Args: adapter_names (`List[str]` or `str`): The names of the adapters to use. adapter_weights (`Union[List[float], float]`, *optional*): The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the adapters. Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" ) pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipeline.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5]) ``` """ if isinstance(adapter_weights, dict): components_passed = set(adapter_weights.keys()) lora_components = set(self._lora_loadable_modules) invalid_components = sorted(components_passed - lora_components) if invalid_components: logger.warning( f"The following components in `adapter_weights` are not part of the pipeline: {invalid_components}. " f"Available components that are LoRA-compatible: {self._lora_loadable_modules}. So, weights belonging " "to the invalid components will be removed and ignored." ) adapter_weights = {k: v for k, v in adapter_weights.items() if k not in invalid_components} adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names adapter_weights = copy.deepcopy(adapter_weights) # Expand weights into a list, one entry per adapter if not isinstance(adapter_weights, list): adapter_weights = [adapter_weights] * len(adapter_names) if len(adapter_names) != len(adapter_weights): raise ValueError( f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(adapter_weights)}" ) list_adapters = self.get_list_adapters() # eg {"unet": ["adapter1", "adapter2"], "text_encoder": ["adapter2"]} # eg ["adapter1", "adapter2"] all_adapters = {adapter for adapters in list_adapters.values() for adapter in adapters} missing_adapters = set(adapter_names) - all_adapters if len(missing_adapters) > 0: raise ValueError( f"Adapter name(s) {missing_adapters} not in the list of present adapters: {all_adapters}." ) # eg {"adapter1": ["unet"], "adapter2": ["unet", "text_encoder"]} invert_list_adapters = { adapter: [part for part, adapters in list_adapters.items() if adapter in adapters] for adapter in all_adapters } # Decompose weights into weights for denoiser and text encoders. _component_adapter_weights = {} for component in self._lora_loadable_modules: model = getattr(self, component, None) # To guard for cases like Wan. In Wan2.1 and WanVace, we have a single denoiser. # Whereas in Wan 2.2, we have two denoisers. if model is None: continue for adapter_name, weights in zip(adapter_names, adapter_weights): if isinstance(weights, dict): component_adapter_weights = weights.pop(component, None) if component_adapter_weights is not None and component not in invert_list_adapters[adapter_name]: logger.warning( ( f"Lora weight dict for adapter '{adapter_name}' contains {component}," f"but this will be ignored because {adapter_name} does not contain weights for {component}." f"Valid parts for {adapter_name} are: {invert_list_adapters[adapter_name]}." ) ) else: component_adapter_weights = weights _component_adapter_weights.setdefault(component, []) _component_adapter_weights[component].append(component_adapter_weights) if issubclass(model.__class__, ModelMixin): model.set_adapters(adapter_names, _component_adapter_weights[component]) elif issubclass(model.__class__, PreTrainedModel): set_adapters_for_text_encoder(adapter_names, model, _component_adapter_weights[component]) def disable_lora(self): """ Disables the active LoRA layers of the pipeline. Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" ) pipeline.disable_lora() ``` """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.disable_lora() elif issubclass(model.__class__, PreTrainedModel): disable_lora_for_text_encoder(model) def enable_lora(self): """ Enables the active LoRA layers of the pipeline. Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic" ) pipeline.enable_lora() ``` """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.enable_lora() elif issubclass(model.__class__, PreTrainedModel): enable_lora_for_text_encoder(model) def delete_adapters(self, adapter_names: Union[List[str], str]): """ Delete an adapter's LoRA layers from the pipeline. Args: adapter_names (`Union[List[str], str]`): The names of the adapters to delete. Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic" ) pipeline.delete_adapters("cinematic") ``` """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") if isinstance(adapter_names, str): adapter_names = [adapter_names] for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: if issubclass(model.__class__, ModelMixin): model.delete_adapters(adapter_names) elif issubclass(model.__class__, PreTrainedModel): for adapter_name in adapter_names: delete_adapter_layers(model, adapter_name) def get_active_adapters(self) -> List[str]: """ Gets the list of the current active adapters. Example: ```python from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", ).to("cuda") pipeline.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy") pipeline.get_active_adapters() ``` """ if not USE_PEFT_BACKEND: raise ValueError( "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`" ) active_adapters = [] for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None and issubclass(model.__class__, ModelMixin): for module in model.modules(): if isinstance(module, BaseTunerLayer): active_adapters = module.active_adapters break return active_adapters def get_list_adapters(self) -> Dict[str, List[str]]: """ Gets the current list of all available adapters in the pipeline. """ if not USE_PEFT_BACKEND: raise ValueError( "PEFT backend is required for this method. Please install the latest version of PEFT `pip install -U peft`" ) set_adapters = {} for component in self._lora_loadable_modules: model = getattr(self, component, None) if ( model is not None and issubclass(model.__class__, (ModelMixin, PreTrainedModel)) and hasattr(model, "peft_config") ): set_adapters[component] = list(model.peft_config.keys()) return set_adapters def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, str, int]) -> None: """ Moves the LoRAs listed in `adapter_names` to a target device. Useful for offloading the LoRA to the CPU in case you want to load multiple adapters and free some GPU memory. After offloading the LoRA adapters to CPU, as long as the rest of the model is still on GPU, the LoRA adapters can no longer be used for inference, as that would cause a device mismatch. Remember to set the device back to GPU before using those LoRA adapters for inference. ```python >>> pipe.load_lora_weights(path_1, adapter_name="adapter-1") >>> pipe.load_lora_weights(path_2, adapter_name="adapter-2") >>> pipe.set_adapters("adapter-1") >>> image_1 = pipe(**kwargs) >>> # switch to adapter-2, offload adapter-1 >>> pipeline.set_lora_device(adapter_names=["adapter-1"], device="cpu") >>> pipeline.set_lora_device(adapter_names=["adapter-2"], device="cuda:0") >>> pipe.set_adapters("adapter-2") >>> image_2 = pipe(**kwargs) >>> # switch back to adapter-1, offload adapter-2 >>> pipeline.set_lora_device(adapter_names=["adapter-2"], device="cpu") >>> pipeline.set_lora_device(adapter_names=["adapter-1"], device="cuda:0") >>> pipe.set_adapters("adapter-1") >>> ... ``` Args: adapter_names (`List[str]`): List of adapters to send device to. device (`Union[torch.device, str, int]`): Device to send the adapters to. Can be either a torch device, a str or an integer. """ if not USE_PEFT_BACKEND: raise ValueError("PEFT backend is required for this method.") for component in self._lora_loadable_modules: model = getattr(self, component, None) if model is not None: for module in model.modules(): if isinstance(module, BaseTunerLayer): for adapter_name in adapter_names: if adapter_name not in module.lora_A: # it is sufficient to check lora_A continue module.lora_A[adapter_name].to(device) module.lora_B[adapter_name].to(device) # this is a param, not a module, so device placement is not in-place -> re-assign if hasattr(module, "lora_magnitude_vector") and module.lora_magnitude_vector is not None: if adapter_name in module.lora_magnitude_vector: module.lora_magnitude_vector[adapter_name] = module.lora_magnitude_vector[ adapter_name ].to(device) def enable_lora_hotswap(self, **kwargs) -> None: """ Hotswap adapters without triggering recompilation of a model or if the ranks of the loaded adapters are different. Args: target_rank (`int`): The highest rank among all the adapters that will be loaded. check_compiled (`str`, *optional*, defaults to `"error"`): How to handle a model that is already compiled. The check can return the following messages: - "error" (default): raise an error - "warn": issue a warning - "ignore": do nothing """ for key, component in self.components.items(): if hasattr(component, "enable_lora_hotswap") and (key in self._lora_loadable_modules): component.enable_lora_hotswap(**kwargs) @staticmethod def pack_weights(layers, prefix): layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers return _pack_dict_with_prefix(layers_weights, prefix) @staticmethod def write_lora_layers( state_dict: Dict[str, torch.Tensor], save_directory: str, is_main_process: bool, weight_name: str, save_function: Callable, safe_serialization: bool, lora_adapter_metadata: Optional[dict] = None, ): """Writes the state dict of the LoRA layers (optionally with metadata) to disk.""" if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return if lora_adapter_metadata and not safe_serialization: raise ValueError("`lora_adapter_metadata` cannot be specified when not using `safe_serialization`.") if lora_adapter_metadata and not isinstance(lora_adapter_metadata, dict): raise TypeError("`lora_adapter_metadata` must be of type `dict`.") if save_function is None: if safe_serialization: def save_function(weights, filename): # Inject framework format. metadata = {"format": "pt"} if lora_adapter_metadata: for key, value in lora_adapter_metadata.items(): if isinstance(value, set): lora_adapter_metadata[key] = list(value) metadata[LORA_ADAPTER_METADATA_KEY] = json.dumps( lora_adapter_metadata, indent=2, sort_keys=True ) return safetensors.torch.save_file(weights, filename, metadata=metadata) else: save_function = torch.save os.makedirs(save_directory, exist_ok=True) if weight_name is None: if safe_serialization: weight_name = LORA_WEIGHT_NAME_SAFE else: weight_name = LORA_WEIGHT_NAME save_path = Path(save_directory, weight_name).as_posix() save_function(state_dict, save_path) logger.info(f"Model weights saved in {save_path}") @classmethod def _optionally_disable_offloading(cls, _pipeline): return _func_optionally_disable_offloading(_pipeline=_pipeline)
diffusers/src/diffusers/loaders/lora_base.py/0
{ "file_path": "diffusers/src/diffusers/loaders/lora_base.py", "repo_id": "diffusers", "token_count": 20354 }
160
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Callable, List, Optional, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import logging from .modeling_utils import ModelMixin logger = logging.get_logger(__name__) class MultiAdapter(ModelMixin): r""" MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to user-assigned weighting. This model inherits from [`ModelMixin`]. Check the superclass documentation for common methods such as downloading or saving. Args: adapters (`List[T2IAdapter]`, *optional*, defaults to None): A list of `T2IAdapter` model instances. """ def __init__(self, adapters: List["T2IAdapter"]): super(MultiAdapter, self).__init__() self.num_adapter = len(adapters) self.adapters = nn.ModuleList(adapters) if len(adapters) == 0: raise ValueError("Expecting at least one adapter") if len(adapters) == 1: raise ValueError("For a single adapter, please use the `T2IAdapter` class instead of `MultiAdapter`") # The outputs from each adapter are added together with a weight. # This means that the change in dimensions from downsampling must # be the same for all adapters. Inductively, it also means the # downscale_factor and total_downscale_factor must be the same for all # adapters. first_adapter_total_downscale_factor = adapters[0].total_downscale_factor first_adapter_downscale_factor = adapters[0].downscale_factor for idx in range(1, len(adapters)): if ( adapters[idx].total_downscale_factor != first_adapter_total_downscale_factor or adapters[idx].downscale_factor != first_adapter_downscale_factor ): raise ValueError( f"Expecting all adapters to have the same downscaling behavior, but got:\n" f"adapters[0].total_downscale_factor={first_adapter_total_downscale_factor}\n" f"adapters[0].downscale_factor={first_adapter_downscale_factor}\n" f"adapter[`{idx}`].total_downscale_factor={adapters[idx].total_downscale_factor}\n" f"adapter[`{idx}`].downscale_factor={adapters[idx].downscale_factor}" ) self.total_downscale_factor = first_adapter_total_downscale_factor self.downscale_factor = first_adapter_downscale_factor def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]: r""" Args: xs (`torch.Tensor`): A tensor of shape (batch, channel, height, width) representing input images for multiple adapter models, concatenated along dimension 1(channel dimension). The `channel` dimension should be equal to `num_adapter` * number of channel per image. adapter_weights (`List[float]`, *optional*, defaults to None): A list of floats representing the weights which will be multiplied by each adapter's output before summing them together. If `None`, equal weights will be used for all adapters. """ if adapter_weights is None: adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter) else: adapter_weights = torch.tensor(adapter_weights) accume_state = None for x, w, adapter in zip(xs, adapter_weights, self.adapters): features = adapter(x) if accume_state is None: accume_state = features for i in range(len(accume_state)): accume_state[i] = w * accume_state[i] else: for i in range(len(features)): accume_state[i] += w * features[i] return accume_state def save_pretrained( self, save_directory: Union[str, os.PathLike], is_main_process: bool = True, save_function: Callable = None, safe_serialization: bool = True, variant: Optional[str] = None, ): """ Save a model and its configuration file to a specified directory, allowing it to be re-loaded with the `[`~models.adapter.MultiAdapter.from_pretrained`]` class method. Args: save_directory (`str` or `os.PathLike`): The directory where the model will be saved. If the directory does not exist, it will be created. is_main_process (`bool`, optional, defaults=True): Indicates whether current process is the main process or not. Useful for distributed training (e.g., TPUs) and need to call this function on all processes. In this case, set `is_main_process=True` only for the main process to avoid race conditions. save_function (`Callable`): Function used to save the state dictionary. Useful for distributed training (e.g., TPUs) to replace `torch.save` with another method. Can also be configured using`DIFFUSERS_SAVE_MODE` environment variable. safe_serialization (`bool`, optional, defaults=True): If `True`, save the model using `safetensors`. If `False`, save the model with `pickle`. variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`. """ idx = 0 model_path_to_save = save_directory for adapter in self.adapters: adapter.save_pretrained( model_path_to_save, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant, ) idx += 1 model_path_to_save = model_path_to_save + f"_{idx}" @classmethod def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a pretrained `MultiAdapter` model from multiple pre-trained adapter models. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, set it back to training mode using `model.train()`. Warnings: *Weights from XXX not initialized from pretrained model* means that the weights of XXX are not pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning. *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, so those weights are discarded. Args: pretrained_model_path (`os.PathLike`): A path to a *directory* containing model weights saved using [`~diffusers.models.adapter.MultiAdapter.save_pretrained`], e.g., `./my_model_directory/adapter`. torch_dtype (`torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under this dtype. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary mapping device identifiers to their maximum memory. Default to the maximum memory available for each GPU and the available CPU RAM if unset. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading by not initializing the weights and only loading the pre-trained weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, setting this argument to `True` will raise an error. variant (`str`, *optional*): If specified, load weights from a `variant` file (*e.g.* pytorch_model.<variant>.bin). `variant` will be ignored when using `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): If `None`, the `safetensors` weights will be downloaded if available **and** if`safetensors` library is installed. If `True`, the model will be forcibly loaded from`safetensors` weights. If `False`, `safetensors` is not used. """ idx = 0 adapters = [] # load adapter and append to list until no adapter directory exists anymore # first adapter has to be saved under `./mydirectory/adapter` to be compliant with `DiffusionPipeline.from_pretrained` # second, third, ... adapters have to be saved under `./mydirectory/adapter_1`, `./mydirectory/adapter_2`, ... model_path_to_load = pretrained_model_path while os.path.isdir(model_path_to_load): adapter = T2IAdapter.from_pretrained(model_path_to_load, **kwargs) adapters.append(adapter) idx += 1 model_path_to_load = pretrained_model_path + f"_{idx}" logger.info(f"{len(adapters)} adapters loaded from {pretrained_model_path}.") if len(adapters) == 0: raise ValueError( f"No T2IAdapters found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." ) return cls(adapters) class T2IAdapter(ModelMixin, ConfigMixin): r""" A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's architecture follows the original implementation of [Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97) and [AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235). This model inherits from [`ModelMixin`]. Check the superclass documentation for the common methods, such as downloading or saving. Args: in_channels (`int`, *optional*, defaults to `3`): The number of channels in the adapter's input (*control image*). Set it to 1 if you're using a gray scale image. channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The number of channels in each downsample block's output hidden state. The `len(block_out_channels)` determines the number of downsample blocks in the adapter. num_res_blocks (`int`, *optional*, defaults to `2`): Number of ResNet blocks in each downsample block. downscale_factor (`int`, *optional*, defaults to `8`): A factor that determines the total downscale factor of the Adapter. adapter_type (`str`, *optional*, defaults to `full_adapter`): Adapter type (`full_adapter` or `full_adapter_xl` or `light_adapter`) to use. """ @register_to_config def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 8, adapter_type: str = "full_adapter", ): super().__init__() if adapter_type == "full_adapter": self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "full_adapter_xl": self.adapter = FullAdapterXL(in_channels, channels, num_res_blocks, downscale_factor) elif adapter_type == "light_adapter": self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor) else: raise ValueError( f"Unsupported adapter_type: '{adapter_type}'. Choose either 'full_adapter' or " "'full_adapter_xl' or 'light_adapter'." ) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This function processes the input tensor `x` through the adapter model and returns a list of feature tensors, each representing information extracted at a different scale from the input. The length of the list is determined by the number of downsample blocks in the Adapter, as specified by the `channels` and `num_res_blocks` parameters during initialization. """ return self.adapter(x) @property def total_downscale_factor(self): return self.adapter.total_downscale_factor @property def downscale_factor(self): """The downscale factor applied in the T2I-Adapter's initial pixel unshuffle operation. If an input image's dimensions are not evenly divisible by the downscale_factor then an exception will be raised. """ return self.adapter.unshuffle.downscale_factor # full adapter class FullAdapter(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 8, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) self.body = nn.ModuleList( [ AdapterBlock(channels[0], channels[0], num_res_blocks), *[ AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True) for i in range(1, len(channels)) ], ] ) self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method processes the input tensor `x` through the FullAdapter model and performs operations including pixel unshuffling, convolution, and a stack of AdapterBlocks. It returns a list of feature tensors, each capturing information at a different stage of processing within the FullAdapter model. The number of feature tensors in the list is determined by the number of downsample blocks specified during initialization. """ x = self.unshuffle(x) x = self.conv_in(x) features = [] for block in self.body: x = block(x) features.append(x) return features class FullAdapterXL(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280, 1280], num_res_blocks: int = 2, downscale_factor: int = 16, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1) self.body = [] # blocks to extract XL features with dimensions of [320, 64, 64], [640, 64, 64], [1280, 32, 32], [1280, 32, 32] for i in range(len(channels)): if i == 1: self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks)) elif i == 2: self.body.append(AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)) else: self.body.append(AdapterBlock(channels[i], channels[i], num_res_blocks)) self.body = nn.ModuleList(self.body) # XL has only one downsampling AdapterBlock. self.total_downscale_factor = downscale_factor * 2 def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method takes the tensor x as input and processes it through FullAdapterXL model. It consists of operations including unshuffling pixels, applying convolution layer and appending each block into list of feature tensors. """ x = self.unshuffle(x) x = self.conv_in(x) features = [] for block in self.body: x = block(x) features.append(x) return features class AdapterBlock(nn.Module): r""" An AdapterBlock is a helper model that contains multiple ResNet-like blocks. It is used in the `FullAdapter` and `FullAdapterXL` models. Args: in_channels (`int`): Number of channels of AdapterBlock's input. out_channels (`int`): Number of channels of AdapterBlock's output. num_res_blocks (`int`): Number of ResNet blocks in the AdapterBlock. down (`bool`, *optional*, defaults to `False`): If `True`, perform downsampling on AdapterBlock's input. """ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False): super().__init__() self.downsample = None if down: self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) self.in_conv = None if in_channels != out_channels: self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) self.resnets = nn.Sequential( *[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)], ) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes tensor x as input and performs operations downsampling and convolutional layers if the self.downsample and self.in_conv properties of AdapterBlock model are specified. Then it applies a series of residual blocks to the input tensor. """ if self.downsample is not None: x = self.downsample(x) if self.in_conv is not None: x = self.in_conv(x) x = self.resnets(x) return x class AdapterResnetBlock(nn.Module): r""" An `AdapterResnetBlock` is a helper model that implements a ResNet-like block. Args: channels (`int`): Number of channels of AdapterResnetBlock's input and output. """ def __init__(self, channels: int): super().__init__() self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.act = nn.ReLU() self.block2 = nn.Conv2d(channels, channels, kernel_size=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes input tensor x and applies a convolutional layer, ReLU activation, and another convolutional layer on the input tensor. It returns addition with the input tensor. """ h = self.act(self.block1(x)) h = self.block2(h) return h + x # light adapter class LightAdapter(nn.Module): r""" See [`T2IAdapter`] for more information. """ def __init__( self, in_channels: int = 3, channels: List[int] = [320, 640, 1280], num_res_blocks: int = 4, downscale_factor: int = 8, ): super().__init__() in_channels = in_channels * downscale_factor**2 self.unshuffle = nn.PixelUnshuffle(downscale_factor) self.body = nn.ModuleList( [ LightAdapterBlock(in_channels, channels[0], num_res_blocks), *[ LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True) for i in range(len(channels) - 1) ], LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True), ] ) self.total_downscale_factor = downscale_factor * (2 ** len(channels)) def forward(self, x: torch.Tensor) -> List[torch.Tensor]: r""" This method takes the input tensor x and performs downscaling and appends it in list of feature tensors. Each feature tensor corresponds to a different level of processing within the LightAdapter. """ x = self.unshuffle(x) features = [] for block in self.body: x = block(x) features.append(x) return features class LightAdapterBlock(nn.Module): r""" A `LightAdapterBlock` is a helper model that contains multiple `LightAdapterResnetBlocks`. It is used in the `LightAdapter` model. Args: in_channels (`int`): Number of channels of LightAdapterBlock's input. out_channels (`int`): Number of channels of LightAdapterBlock's output. num_res_blocks (`int`): Number of LightAdapterResnetBlocks in the LightAdapterBlock. down (`bool`, *optional*, defaults to `False`): If `True`, perform downsampling on LightAdapterBlock's input. """ def __init__(self, in_channels: int, out_channels: int, num_res_blocks: int, down: bool = False): super().__init__() mid_channels = out_channels // 4 self.downsample = None if down: self.downsample = nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True) self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1) self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)]) self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This method takes tensor x as input and performs downsampling if required. Then it applies in convolution layer, a sequence of residual blocks, and out convolutional layer. """ if self.downsample is not None: x = self.downsample(x) x = self.in_conv(x) x = self.resnets(x) x = self.out_conv(x) return x class LightAdapterResnetBlock(nn.Module): """ A `LightAdapterResnetBlock` is a helper model that implements a ResNet-like block with a slightly different architecture than `AdapterResnetBlock`. Args: channels (`int`): Number of channels of LightAdapterResnetBlock's input and output. """ def __init__(self, channels: int): super().__init__() self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) self.act = nn.ReLU() self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1) def forward(self, x: torch.Tensor) -> torch.Tensor: r""" This function takes input tensor x and processes it through one convolutional layer, ReLU activation, and another convolutional layer and adds it to input tensor. """ h = self.act(self.block1(x)) h = self.block2(h) return h + x
diffusers/src/diffusers/models/adapter.py/0
{ "file_path": "diffusers/src/diffusers/models/adapter.py", "repo_id": "diffusers", "token_count": 10091 }
161
# Copyright 2025 The Mochi team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ...utils.accelerate_utils import apply_forward_hook from ..activations import get_activation from ..attention_processor import Attention, MochiVaeAttnProcessor2_0 from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .autoencoder_kl_cogvideox import CogVideoXCausalConv3d from .vae import DecoderOutput, DiagonalGaussianDistribution logger = logging.get_logger(__name__) # pylint: disable=invalid-name class MochiChunkedGroupNorm3D(nn.Module): r""" Applies per-frame group normalization for 5D video inputs. It also supports memory-efficient chunked group normalization. Args: num_channels (int): Number of channels expected in input num_groups (int, optional): Number of groups to separate the channels into. Default: 32 affine (bool, optional): If True, this module has learnable affine parameters. Default: True chunk_size (int, optional): Size of each chunk for processing. Default: 8 """ def __init__( self, num_channels: int, num_groups: int = 32, affine: bool = True, chunk_size: int = 8, ): super().__init__() self.norm_layer = nn.GroupNorm(num_channels=num_channels, num_groups=num_groups, affine=affine) self.chunk_size = chunk_size def forward(self, x: torch.Tensor = None) -> torch.Tensor: batch_size = x.size(0) x = x.permute(0, 2, 1, 3, 4).flatten(0, 1) output = torch.cat([self.norm_layer(chunk) for chunk in x.split(self.chunk_size, dim=0)], dim=0) output = output.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3, 4) return output class MochiResnetBlock3D(nn.Module): r""" A 3D ResNet block used in the Mochi model. Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. non_linearity (`str`, defaults to `"swish"`): Activation function to use. """ def __init__( self, in_channels: int, out_channels: Optional[int] = None, act_fn: str = "swish", ): super().__init__() out_channels = out_channels or in_channels self.in_channels = in_channels self.out_channels = out_channels self.nonlinearity = get_activation(act_fn) self.norm1 = MochiChunkedGroupNorm3D(num_channels=in_channels) self.conv1 = CogVideoXCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, pad_mode="replicate" ) self.norm2 = MochiChunkedGroupNorm3D(num_channels=out_channels) self.conv2 = CogVideoXCausalConv3d( in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, pad_mode="replicate" ) def forward( self, inputs: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.Tensor: new_conv_cache = {} conv_cache = conv_cache or {} hidden_states = inputs hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states, new_conv_cache["conv1"] = self.conv1(hidden_states, conv_cache=conv_cache.get("conv1")) hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states, new_conv_cache["conv2"] = self.conv2(hidden_states, conv_cache=conv_cache.get("conv2")) hidden_states = hidden_states + inputs return hidden_states, new_conv_cache class MochiDownBlock3D(nn.Module): r""" An downsampling block used in the Mochi model. Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. num_layers (`int`, defaults to `1`): Number of resnet blocks in the block. temporal_expansion (`int`, defaults to `2`): Temporal expansion factor. spatial_expansion (`int`, defaults to `2`): Spatial expansion factor. """ def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, temporal_expansion: int = 2, spatial_expansion: int = 2, add_attention: bool = True, ): super().__init__() self.temporal_expansion = temporal_expansion self.spatial_expansion = spatial_expansion self.conv_in = CogVideoXCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=(temporal_expansion, spatial_expansion, spatial_expansion), stride=(temporal_expansion, spatial_expansion, spatial_expansion), pad_mode="replicate", ) resnets = [] norms = [] attentions = [] for _ in range(num_layers): resnets.append(MochiResnetBlock3D(in_channels=out_channels)) if add_attention: norms.append(MochiChunkedGroupNorm3D(num_channels=out_channels)) attentions.append( Attention( query_dim=out_channels, heads=out_channels // 32, dim_head=32, qk_norm="l2", is_causal=True, processor=MochiVaeAttnProcessor2_0(), ) ) else: norms.append(None) attentions.append(None) self.resnets = nn.ModuleList(resnets) self.norms = nn.ModuleList(norms) self.attentions = nn.ModuleList(attentions) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None, chunk_size: int = 2**15, ) -> torch.Tensor: r"""Forward method of the `MochiUpBlock3D` class.""" new_conv_cache = {} conv_cache = conv_cache or {} hidden_states, new_conv_cache["conv_in"] = self.conv_in(hidden_states) for i, (resnet, norm, attn) in enumerate(zip(self.resnets, self.norms, self.attentions)): conv_cache_key = f"resnet_{i}" if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func( resnet, hidden_states, conv_cache.get(conv_cache_key), ) else: hidden_states, new_conv_cache[conv_cache_key] = resnet( hidden_states, conv_cache=conv_cache.get(conv_cache_key) ) if attn is not None: residual = hidden_states hidden_states = norm(hidden_states) batch_size, num_channels, num_frames, height, width = hidden_states.shape hidden_states = hidden_states.permute(0, 3, 4, 2, 1).flatten(0, 2).contiguous() # Perform attention in chunks to avoid following error: # RuntimeError: CUDA error: invalid configuration argument if hidden_states.size(0) <= chunk_size: hidden_states = attn(hidden_states) else: hidden_states_chunks = [] for i in range(0, hidden_states.size(0), chunk_size): hidden_states_chunk = hidden_states[i : i + chunk_size] hidden_states_chunk = attn(hidden_states_chunk) hidden_states_chunks.append(hidden_states_chunk) hidden_states = torch.cat(hidden_states_chunks) hidden_states = hidden_states.unflatten(0, (batch_size, height, width)).permute(0, 4, 3, 1, 2) hidden_states = residual + hidden_states return hidden_states, new_conv_cache class MochiMidBlock3D(nn.Module): r""" A middle block used in the Mochi model. Args: in_channels (`int`): Number of input channels. num_layers (`int`, defaults to `3`): Number of resnet blocks in the block. """ def __init__( self, in_channels: int, # 768 num_layers: int = 3, add_attention: bool = True, ): super().__init__() resnets = [] norms = [] attentions = [] for _ in range(num_layers): resnets.append(MochiResnetBlock3D(in_channels=in_channels)) if add_attention: norms.append(MochiChunkedGroupNorm3D(num_channels=in_channels)) attentions.append( Attention( query_dim=in_channels, heads=in_channels // 32, dim_head=32, qk_norm="l2", is_causal=True, processor=MochiVaeAttnProcessor2_0(), ) ) else: norms.append(None) attentions.append(None) self.resnets = nn.ModuleList(resnets) self.norms = nn.ModuleList(norms) self.attentions = nn.ModuleList(attentions) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.Tensor: r"""Forward method of the `MochiMidBlock3D` class.""" new_conv_cache = {} conv_cache = conv_cache or {} for i, (resnet, norm, attn) in enumerate(zip(self.resnets, self.norms, self.attentions)): conv_cache_key = f"resnet_{i}" if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func( resnet, hidden_states, conv_cache.get(conv_cache_key) ) else: hidden_states, new_conv_cache[conv_cache_key] = resnet( hidden_states, conv_cache=conv_cache.get(conv_cache_key) ) if attn is not None: residual = hidden_states hidden_states = norm(hidden_states) batch_size, num_channels, num_frames, height, width = hidden_states.shape hidden_states = hidden_states.permute(0, 3, 4, 2, 1).flatten(0, 2).contiguous() hidden_states = attn(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, height, width)).permute(0, 4, 3, 1, 2) hidden_states = residual + hidden_states return hidden_states, new_conv_cache class MochiUpBlock3D(nn.Module): r""" An upsampling block used in the Mochi model. Args: in_channels (`int`): Number of input channels. out_channels (`int`, *optional*): Number of output channels. If None, defaults to `in_channels`. num_layers (`int`, defaults to `1`): Number of resnet blocks in the block. temporal_expansion (`int`, defaults to `2`): Temporal expansion factor. spatial_expansion (`int`, defaults to `2`): Spatial expansion factor. """ def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, temporal_expansion: int = 2, spatial_expansion: int = 2, ): super().__init__() self.temporal_expansion = temporal_expansion self.spatial_expansion = spatial_expansion resnets = [] for _ in range(num_layers): resnets.append(MochiResnetBlock3D(in_channels=in_channels)) self.resnets = nn.ModuleList(resnets) self.proj = nn.Linear(in_channels, out_channels * temporal_expansion * spatial_expansion**2) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.Tensor: r"""Forward method of the `MochiUpBlock3D` class.""" new_conv_cache = {} conv_cache = conv_cache or {} for i, resnet in enumerate(self.resnets): conv_cache_key = f"resnet_{i}" if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func( resnet, hidden_states, conv_cache.get(conv_cache_key), ) else: hidden_states, new_conv_cache[conv_cache_key] = resnet( hidden_states, conv_cache=conv_cache.get(conv_cache_key) ) hidden_states = hidden_states.permute(0, 2, 3, 4, 1) hidden_states = self.proj(hidden_states) hidden_states = hidden_states.permute(0, 4, 1, 2, 3) batch_size, num_channels, num_frames, height, width = hidden_states.shape st = self.temporal_expansion sh = self.spatial_expansion sw = self.spatial_expansion # Reshape and unpatchify hidden_states = hidden_states.view(batch_size, -1, st, sh, sw, num_frames, height, width) hidden_states = hidden_states.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous() hidden_states = hidden_states.view(batch_size, -1, num_frames * st, height * sh, width * sw) return hidden_states, new_conv_cache class FourierFeatures(nn.Module): def __init__(self, start: int = 6, stop: int = 8, step: int = 1): super().__init__() self.start = start self.stop = stop self.step = step def forward(self, inputs: torch.Tensor) -> torch.Tensor: r"""Forward method of the `FourierFeatures` class.""" original_dtype = inputs.dtype inputs = inputs.to(torch.float32) num_channels = inputs.shape[1] num_freqs = (self.stop - self.start) // self.step freqs = torch.arange(self.start, self.stop, self.step, dtype=inputs.dtype, device=inputs.device) w = torch.pow(2.0, freqs) * (2 * torch.pi) # [num_freqs] w = w.repeat(num_channels)[None, :, None, None, None] # [1, num_channels * num_freqs, 1, 1, 1] # Interleaved repeat of input channels to match w h = inputs.repeat_interleave( num_freqs, dim=1, output_size=inputs.shape[1] * num_freqs ) # [B, C * num_freqs, T, H, W] # Scale channels by frequency. h = w * h return torch.cat([inputs, torch.sin(h), torch.cos(h)], dim=1).to(original_dtype) class MochiEncoder3D(nn.Module): r""" The `MochiEncoder3D` layer of a variational autoencoder that encodes input video samples to its latent representation. Args: in_channels (`int`, *optional*): The number of input channels. out_channels (`int`, *optional*): The number of output channels. block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(128, 256, 512, 768)`): The number of output channels for each block. layers_per_block (`Tuple[int, ...]`, *optional*, defaults to `(3, 3, 4, 6, 3)`): The number of resnet blocks for each block. temporal_expansions (`Tuple[int, ...]`, *optional*, defaults to `(1, 2, 3)`): The temporal expansion factor for each of the up blocks. spatial_expansions (`Tuple[int, ...]`, *optional*, defaults to `(2, 2, 2)`): The spatial expansion factor for each of the up blocks. non_linearity (`str`, *optional*, defaults to `"swish"`): The non-linearity to use in the decoder. """ def __init__( self, in_channels: int, out_channels: int, block_out_channels: Tuple[int, ...] = (128, 256, 512, 768), layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3), temporal_expansions: Tuple[int, ...] = (1, 2, 3), spatial_expansions: Tuple[int, ...] = (2, 2, 2), add_attention_block: Tuple[bool, ...] = (False, True, True, True, True), act_fn: str = "swish", ): super().__init__() self.nonlinearity = get_activation(act_fn) self.fourier_features = FourierFeatures() self.proj_in = nn.Linear(in_channels, block_out_channels[0]) self.block_in = MochiMidBlock3D( in_channels=block_out_channels[0], num_layers=layers_per_block[0], add_attention=add_attention_block[0] ) down_blocks = [] for i in range(len(block_out_channels) - 1): down_block = MochiDownBlock3D( in_channels=block_out_channels[i], out_channels=block_out_channels[i + 1], num_layers=layers_per_block[i + 1], temporal_expansion=temporal_expansions[i], spatial_expansion=spatial_expansions[i], add_attention=add_attention_block[i + 1], ) down_blocks.append(down_block) self.down_blocks = nn.ModuleList(down_blocks) self.block_out = MochiMidBlock3D( in_channels=block_out_channels[-1], num_layers=layers_per_block[-1], add_attention=add_attention_block[-1] ) self.norm_out = MochiChunkedGroupNorm3D(block_out_channels[-1]) self.proj_out = nn.Linear(block_out_channels[-1], 2 * out_channels, bias=False) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None ) -> torch.Tensor: r"""Forward method of the `MochiEncoder3D` class.""" new_conv_cache = {} conv_cache = conv_cache or {} hidden_states = self.fourier_features(hidden_states) hidden_states = hidden_states.permute(0, 2, 3, 4, 1) hidden_states = self.proj_in(hidden_states) hidden_states = hidden_states.permute(0, 4, 1, 2, 3) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states, new_conv_cache["block_in"] = self._gradient_checkpointing_func( self.block_in, hidden_states, conv_cache.get("block_in") ) for i, down_block in enumerate(self.down_blocks): conv_cache_key = f"down_block_{i}" hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func( down_block, hidden_states, conv_cache.get(conv_cache_key) ) else: hidden_states, new_conv_cache["block_in"] = self.block_in( hidden_states, conv_cache=conv_cache.get("block_in") ) for i, down_block in enumerate(self.down_blocks): conv_cache_key = f"down_block_{i}" hidden_states, new_conv_cache[conv_cache_key] = down_block( hidden_states, conv_cache=conv_cache.get(conv_cache_key) ) hidden_states, new_conv_cache["block_out"] = self.block_out( hidden_states, conv_cache=conv_cache.get("block_out") ) hidden_states = self.norm_out(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = hidden_states.permute(0, 2, 3, 4, 1) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.permute(0, 4, 1, 2, 3) return hidden_states, new_conv_cache class MochiDecoder3D(nn.Module): r""" The `MochiDecoder3D` layer of a variational autoencoder that decodes its latent representation into an output sample. Args: in_channels (`int`, *optional*): The number of input channels. out_channels (`int`, *optional*): The number of output channels. block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(128, 256, 512, 768)`): The number of output channels for each block. layers_per_block (`Tuple[int, ...]`, *optional*, defaults to `(3, 3, 4, 6, 3)`): The number of resnet blocks for each block. temporal_expansions (`Tuple[int, ...]`, *optional*, defaults to `(1, 2, 3)`): The temporal expansion factor for each of the up blocks. spatial_expansions (`Tuple[int, ...]`, *optional*, defaults to `(2, 2, 2)`): The spatial expansion factor for each of the up blocks. non_linearity (`str`, *optional*, defaults to `"swish"`): The non-linearity to use in the decoder. """ def __init__( self, in_channels: int, # 12 out_channels: int, # 3 block_out_channels: Tuple[int, ...] = (128, 256, 512, 768), layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3), temporal_expansions: Tuple[int, ...] = (1, 2, 3), spatial_expansions: Tuple[int, ...] = (2, 2, 2), act_fn: str = "swish", ): super().__init__() self.nonlinearity = get_activation(act_fn) self.conv_in = nn.Conv3d(in_channels, block_out_channels[-1], kernel_size=(1, 1, 1)) self.block_in = MochiMidBlock3D( in_channels=block_out_channels[-1], num_layers=layers_per_block[-1], add_attention=False, ) up_blocks = [] for i in range(len(block_out_channels) - 1): up_block = MochiUpBlock3D( in_channels=block_out_channels[-i - 1], out_channels=block_out_channels[-i - 2], num_layers=layers_per_block[-i - 2], temporal_expansion=temporal_expansions[-i - 1], spatial_expansion=spatial_expansions[-i - 1], ) up_blocks.append(up_block) self.up_blocks = nn.ModuleList(up_blocks) self.block_out = MochiMidBlock3D( in_channels=block_out_channels[0], num_layers=layers_per_block[0], add_attention=False, ) self.proj_out = nn.Linear(block_out_channels[0], out_channels) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None ) -> torch.Tensor: r"""Forward method of the `MochiDecoder3D` class.""" new_conv_cache = {} conv_cache = conv_cache or {} hidden_states = self.conv_in(hidden_states) # 1. Mid if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states, new_conv_cache["block_in"] = self._gradient_checkpointing_func( self.block_in, hidden_states, conv_cache.get("block_in") ) for i, up_block in enumerate(self.up_blocks): conv_cache_key = f"up_block_{i}" hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func( up_block, hidden_states, conv_cache.get(conv_cache_key) ) else: hidden_states, new_conv_cache["block_in"] = self.block_in( hidden_states, conv_cache=conv_cache.get("block_in") ) for i, up_block in enumerate(self.up_blocks): conv_cache_key = f"up_block_{i}" hidden_states, new_conv_cache[conv_cache_key] = up_block( hidden_states, conv_cache=conv_cache.get(conv_cache_key) ) hidden_states, new_conv_cache["block_out"] = self.block_out( hidden_states, conv_cache=conv_cache.get("block_out") ) hidden_states = self.nonlinearity(hidden_states) hidden_states = hidden_states.permute(0, 2, 3, 4, 1) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.permute(0, 4, 1, 2, 3) return hidden_states, new_conv_cache class AutoencoderKLMochi(ModelMixin, ConfigMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in [Mochi 1 preview](https://github.com/genmoai/models). This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. scaling_factor (`float`, *optional*, defaults to `1.15258426`): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper. """ _supports_gradient_checkpointing = True _no_split_modules = ["MochiResnetBlock3D"] @register_to_config def __init__( self, in_channels: int = 15, out_channels: int = 3, encoder_block_out_channels: Tuple[int] = (64, 128, 256, 384), decoder_block_out_channels: Tuple[int] = (128, 256, 512, 768), latent_channels: int = 12, layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3), act_fn: str = "silu", temporal_expansions: Tuple[int, ...] = (1, 2, 3), spatial_expansions: Tuple[int, ...] = (2, 2, 2), add_attention_block: Tuple[bool, ...] = (False, True, True, True, True), latents_mean: Tuple[float, ...] = ( -0.06730895953510081, -0.038011381506090416, -0.07477820912866141, -0.05565264470995561, 0.012767231469026969, -0.04703542746246419, 0.043896967884726704, -0.09346305707025976, -0.09918314763016893, -0.008729793427399178, -0.011931556316503654, -0.0321993391887285, ), latents_std: Tuple[float, ...] = ( 0.9263795028493863, 0.9248894543193766, 0.9393059390890617, 0.959253732819592, 0.8244560132752793, 0.917259975397747, 0.9294154431013696, 1.3720942357788521, 0.881393668867029, 0.9168315692124348, 0.9185249279345552, 0.9274757570805041, ), scaling_factor: float = 1.0, ): super().__init__() self.encoder = MochiEncoder3D( in_channels=in_channels, out_channels=latent_channels, block_out_channels=encoder_block_out_channels, layers_per_block=layers_per_block, temporal_expansions=temporal_expansions, spatial_expansions=spatial_expansions, add_attention_block=add_attention_block, act_fn=act_fn, ) self.decoder = MochiDecoder3D( in_channels=latent_channels, out_channels=out_channels, block_out_channels=decoder_block_out_channels, layers_per_block=layers_per_block, temporal_expansions=temporal_expansions, spatial_expansions=spatial_expansions, act_fn=act_fn, ) self.spatial_compression_ratio = functools.reduce(lambda x, y: x * y, spatial_expansions, 1) self.temporal_compression_ratio = functools.reduce(lambda x, y: x * y, temporal_expansions, 1) # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. self.use_slicing = False # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the # intermediate tiles together, the memory requirement can be lowered. self.use_tiling = False # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames # at a fixed frame batch size (based on `self.num_latent_frames_batch_sizes`), the memory requirement can be lowered. self.use_framewise_encoding = False self.use_framewise_decoding = False # This can be used to determine how the number of output frames in the final decoded video. To maintain consistency with # the original implementation, this defaults to `True`. # - Original implementation (drop_last_temporal_frames=True): # Output frames = (latent_frames - 1) * temporal_compression_ratio + 1 # - Without dropping additional temporal upscaled frames (drop_last_temporal_frames=False): # Output frames = latent_frames * temporal_compression_ratio # The latter case is useful for frame packing and some training/finetuning scenarios where the additional. self.drop_last_temporal_frames = True # This can be configured based on the amount of GPU memory available. # `12` for sample frames and `2` for latent frames are sensible defaults for consumer GPUs. # Setting it to higher values results in higher memory usage. self.num_sample_frames_batch_size = 12 self.num_latent_frames_batch_size = 2 # The minimal tile height and width for spatial tiling to be used self.tile_sample_min_height = 256 self.tile_sample_min_width = 256 # The minimal distance between two spatial tiles self.tile_sample_stride_height = 192 self.tile_sample_stride_width = 192 def enable_tiling( self, tile_sample_min_height: Optional[int] = None, tile_sample_min_width: Optional[int] = None, tile_sample_stride_height: Optional[float] = None, tile_sample_stride_width: Optional[float] = None, ) -> None: r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. Args: tile_sample_min_height (`int`, *optional*): The minimum height required for a sample to be separated into tiles across the height dimension. tile_sample_min_width (`int`, *optional*): The minimum width required for a sample to be separated into tiles across the width dimension. tile_sample_stride_height (`int`, *optional*): The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are no tiling artifacts produced across the height dimension. tile_sample_stride_width (`int`, *optional*): The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling artifacts produced across the width dimension. """ self.use_tiling = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width def disable_tiling(self) -> None: r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.use_tiling = False def enable_slicing(self) -> None: r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True def disable_slicing(self) -> None: r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False def _enable_framewise_encoding(self): r""" Enables the framewise VAE encoding implementation with past latent padding. By default, Diffusers uses the oneshot encoding implementation without current latent replicate padding. Warning: Framewise encoding may not work as expected due to the causal attention layers. If you enable framewise encoding, encode a video, and try to decode it, there will be noticeable jittering effect. """ self.use_framewise_encoding = True for name, module in self.named_modules(): if isinstance(module, CogVideoXCausalConv3d): module.pad_mode = "constant" def _enable_framewise_decoding(self): r""" Enables the framewise VAE decoding implementation with past latent padding. By default, Diffusers uses the oneshot decoding implementation without current latent replicate padding. """ self.use_framewise_decoding = True for name, module in self.named_modules(): if isinstance(module, CogVideoXCausalConv3d): module.pad_mode = "constant" def _encode(self, x: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = x.shape if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): return self.tiled_encode(x) if self.use_framewise_encoding: raise NotImplementedError( "Frame-wise encoding does not work with the Mochi VAE Encoder due to the presence of attention layers. " "As intermediate frames are not independent from each other, they cannot be encoded frame-wise." ) else: enc, _ = self.encoder(x) return enc @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded videos. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: batch_size, num_channels, num_frames, height, width = z.shape tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height): return self.tiled_decode(z, return_dict=return_dict) if self.use_framewise_decoding: conv_cache = None dec = [] for i in range(0, num_frames, self.num_latent_frames_batch_size): z_intermediate = z[:, :, i : i + self.num_latent_frames_batch_size] z_intermediate, conv_cache = self.decoder(z_intermediate, conv_cache=conv_cache) dec.append(z_intermediate) dec = torch.cat(dec, dim=2) else: dec, _ = self.decoder(z) if self.drop_last_temporal_frames and dec.size(2) >= self.temporal_compression_ratio: dec = dec[:, :, self.temporal_compression_ratio - 1 :] if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: """ Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( y / blend_extent ) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[4], b.shape[4], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( x / blend_extent ) return b def tiled_encode(self, x: torch.Tensor) -> torch.Tensor: r"""Encode a batch of images using a tiled encoder. Args: x (`torch.Tensor`): Input batch of videos. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ batch_size, num_channels, num_frames, height, width = x.shape latent_height = height // self.spatial_compression_ratio latent_width = width // self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = tile_latent_min_height - tile_latent_stride_height blend_width = tile_latent_min_width - tile_latent_stride_width # Split x into overlapping tiles and encode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, self.tile_sample_stride_height): row = [] for j in range(0, width, self.tile_sample_stride_width): if self.use_framewise_encoding: raise NotImplementedError( "Frame-wise encoding does not work with the Mochi VAE Encoder due to the presence of attention layers. " "As intermediate frames are not independent from each other, they cannot be encoded frame-wise." ) else: time, _ = self.encoder( x[:, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width] ) row.append(time) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width]) result_rows.append(torch.cat(result_row, dim=4)) enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width] return enc def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ batch_size, num_channels, num_frames, height, width = z.shape sample_height = height * self.spatial_compression_ratio sample_width = width * self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = self.tile_sample_min_height - self.tile_sample_stride_height blend_width = self.tile_sample_min_width - self.tile_sample_stride_width # Split z into overlapping tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, tile_latent_stride_height): row = [] for j in range(0, width, tile_latent_stride_width): if self.use_framewise_decoding: time = [] conv_cache = None for k in range(0, num_frames, self.num_latent_frames_batch_size): tile = z[ :, :, k : k + self.num_latent_frames_batch_size, i : i + tile_latent_min_height, j : j + tile_latent_min_width, ] tile, conv_cache = self.decoder(tile, conv_cache=conv_cache) time.append(tile) time = torch.cat(time, dim=2) else: time, _ = self.decoder(z[:, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width]) if self.drop_last_temporal_frames and time.size(2) >= self.temporal_compression_ratio: time = time[:, :, self.temporal_compression_ratio - 1 :] row.append(time) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width]) result_rows.append(torch.cat(result_row, dim=4)) dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width] if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[torch.Tensor, torch.Tensor]: x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z) if not return_dict: return (dec,) return dec
diffusers/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py", "repo_id": "diffusers", "token_count": 21491 }
162
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ...configuration_utils import ConfigMixin, flax_register_to_config from ...utils import BaseOutput from ..embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from ..modeling_flax_utils import FlaxModelMixin from ..unets.unet_2d_blocks_flax import ( FlaxCrossAttnDownBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn, ) @flax.struct.dataclass class FlaxControlNetOutput(BaseOutput): """ The output of [`FlaxControlNetModel`]. Args: down_block_res_samples (`jnp.ndarray`): mid_block_res_sample (`jnp.ndarray`): """ down_block_res_samples: jnp.ndarray mid_block_res_sample: jnp.ndarray class FlaxControlNetConditioningEmbedding(nn.Module): conditioning_embedding_channels: int block_out_channels: Tuple[int, ...] = (16, 32, 96, 256) dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.conv_in = nn.Conv( self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks = [] for i in range(len(self.block_out_channels) - 1): channel_in = self.block_out_channels[i] channel_out = self.block_out_channels[i + 1] conv1 = nn.Conv( channel_in, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv1) conv2 = nn.Conv( channel_out, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, ) blocks.append(conv2) self.blocks = blocks self.conv_out = nn.Conv( self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__(self, conditioning: jnp.ndarray) -> jnp.ndarray: embedding = self.conv_in(conditioning) embedding = nn.silu(embedding) for block in self.blocks: embedding = block(embedding) embedding = nn.silu(embedding) embedding = self.conv_out(embedding) return embedding @flax_register_to_config class FlaxControlNetModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" A ControlNet model. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it’s generic methods implemented for all models (such as downloading or saving). This model is also a Flax Linen [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matters related to its general usage and behavior. Inherent JAX features such as the following are supported: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. down_block_types (`Tuple[str]`, *optional*, defaults to `("FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D")`): The tuple of downsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int` or `Tuple[int]`, *optional*): The number of attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. controlnet_conditioning_channel_order (`str`, *optional*, defaults to `rgb`): The channel order of conditional image. Will convert to `rgb` if it's `bgr`. conditioning_embedding_out_channels (`tuple`, *optional*, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. """ sample_size: int = 32 in_channels: int = 4 down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) only_cross_attention: Union[bool, Tuple[bool, ...]] = False block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int, ...]] = 8 num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 controlnet_conditioning_channel_order: str = "rgb" conditioning_embedding_out_channels: Tuple[int, ...] = (16, 32, 96, 256) def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) controlnet_cond_shape = (1, 3, self.sample_size * 8, self.sample_size * 8) controlnet_cond = jnp.zeros(controlnet_cond_shape, dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states, controlnet_cond)["params"] def setup(self) -> None: block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = self.num_attention_heads or self.attention_head_dim # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time self.time_proj = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift ) self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype) self.controlnet_cond_embedding = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, ) only_cross_attention = self.only_cross_attention if isinstance(only_cross_attention, bool): only_cross_attention = (only_cross_attention,) * len(self.down_block_types) if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(self.down_block_types) # down down_blocks = [] controlnet_down_blocks = [] output_channel = block_out_channels[0] controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block) for i, down_block_type in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == "CrossAttnDownBlock2D": down_block = FlaxCrossAttnDownBlock2D( in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, ) else: down_block = FlaxDownBlock2D( in_channels=input_channel, out_channels=output_channel, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(down_block) for _ in range(self.layers_per_block): controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block) if not is_final_block: controlnet_block = nn.Conv( output_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) controlnet_down_blocks.append(controlnet_block) self.down_blocks = down_blocks self.controlnet_down_blocks = controlnet_down_blocks # mid mid_block_channel = block_out_channels[-1] self.mid_block = FlaxUNetMidBlock2DCrossAttn( in_channels=mid_block_channel, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, ) self.controlnet_mid_block = nn.Conv( mid_block_channel, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, ) def __call__( self, sample: jnp.ndarray, timesteps: Union[jnp.ndarray, float, int], encoder_hidden_states: jnp.ndarray, controlnet_cond: jnp.ndarray, conditioning_scale: float = 1.0, return_dict: bool = True, train: bool = False, ) -> Union[FlaxControlNetOutput, Tuple[Tuple[jnp.ndarray, ...], jnp.ndarray]]: r""" Args: sample (`jnp.ndarray`): (batch, channel, height, width) noisy inputs tensor timestep (`jnp.ndarray` or `float` or `int`): timesteps encoder_hidden_states (`jnp.ndarray`): (batch_size, sequence_length, hidden_size) encoder hidden states controlnet_cond (`jnp.ndarray`): (batch, channel, height, width) the conditional input tensor conditioning_scale (`float`, *optional*, defaults to `1.0`): the scale factor for controlnet outputs return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] instead of a plain tuple. train (`bool`, *optional*, defaults to `False`): Use deterministic functions and disable dropout when not training. Returns: [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] or `tuple`: [`~models.unets.unet_2d_condition_flax.FlaxUNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ channel_order = self.controlnet_conditioning_channel_order if channel_order == "bgr": controlnet_cond = jnp.flip(controlnet_cond, axis=1) # 1. time if not isinstance(timesteps, jnp.ndarray): timesteps = jnp.array([timesteps], dtype=jnp.int32) elif isinstance(timesteps, jnp.ndarray) and len(timesteps.shape) == 0: timesteps = timesteps.astype(dtype=jnp.float32) timesteps = jnp.expand_dims(timesteps, 0) t_emb = self.time_proj(timesteps) t_emb = self.time_embedding(t_emb) # 2. pre-process sample = jnp.transpose(sample, (0, 2, 3, 1)) sample = self.conv_in(sample) controlnet_cond = jnp.transpose(controlnet_cond, (0, 2, 3, 1)) controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) sample += controlnet_cond # 3. down down_block_res_samples = (sample,) for down_block in self.down_blocks: if isinstance(down_block, FlaxCrossAttnDownBlock2D): sample, res_samples = down_block(sample, t_emb, encoder_hidden_states, deterministic=not train) else: sample, res_samples = down_block(sample, t_emb, deterministic=not train) down_block_res_samples += res_samples # 4. mid sample = self.mid_block(sample, t_emb, encoder_hidden_states, deterministic=not train) # 5. contronet blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples += (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample )
diffusers/src/diffusers/models/controlnets/controlnet_flax.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnets/controlnet_flax.py", "repo_id": "diffusers", "token_count": 7635 }
163
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch - Flax general utilities.""" import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging logger = logging.get_logger(__name__) def rename_key(key): regex = r"\w+[.]\d+" pats = re.findall(regex, key) for pat in pats: key = key.replace(pat, "_".join(pat.split("."))) return key ##################### # PyTorch => Flax # ##################### # Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69 # and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict): """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary""" # conv norm or layer norm renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) # rename attention layers if len(pt_tuple_key) > 1: for rename_from, rename_to in ( ("to_out_0", "proj_attn"), ("to_k", "key"), ("to_v", "value"), ("to_q", "query"), ): if pt_tuple_key[-2] == rename_from: weight_name = pt_tuple_key[-1] weight_name = "kernel" if weight_name == "weight" else weight_name renamed_pt_tuple_key = pt_tuple_key[:-2] + (rename_to, weight_name) if renamed_pt_tuple_key in random_flax_state_dict: assert random_flax_state_dict[renamed_pt_tuple_key].shape == pt_tensor.T.shape return renamed_pt_tuple_key, pt_tensor.T if ( any("norm" in str_ for str_ in pt_tuple_key) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: pt_tuple_key = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: pt_tensor = pt_tensor.transpose(2, 3, 1, 0) return renamed_pt_tuple_key, pt_tensor # linear layer renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": pt_tensor = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42): # Step 1: Convert pytorch tensor to numpy pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params random_flax_params = flax_model.init_weights(PRNGKey(init_key)) random_flax_state_dict = flatten_dict(random_flax_params) flax_state_dict = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): renamed_pt_key = rename_key(pt_key) pt_tuple_key = tuple(renamed_pt_key.split(".")) # Correctly rename weight parameters flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown flax_state_dict[flax_key] = jnp.asarray(flax_tensor) return unflatten_dict(flax_state_dict)
diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py", "repo_id": "diffusers", "token_count": 2325 }
164
# Copyright 2025 Alpha-VLLM Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ..attention import LuminaFeedForward from ..attention_processor import Attention, LuminaAttnProcessor2_0 from ..embeddings import ( LuminaCombinedTimestepCaptionEmbedding, LuminaPatchEmbed, ) from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import LuminaLayerNormContinuous, LuminaRMSNormZero, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class LuminaNextDiTBlock(nn.Module): """ A LuminaNextDiTBlock for LuminaNextDiT2DModel. Parameters: dim (`int`): Embedding dimension of the input features. num_attention_heads (`int`): Number of attention heads. num_kv_heads (`int`): Number of attention heads in key and value features (if using GQA), or set to None for the same as query. multiple_of (`int`): The number of multiple of ffn layer. ffn_dim_multiplier (`float`): The multiplier factor of ffn layer dimension. norm_eps (`float`): The eps for norm layer. qk_norm (`bool`): normalization for query and key. cross_attention_dim (`int`): Cross attention embedding dimension of the input text prompt hidden_states. norm_elementwise_affine (`bool`, *optional*, defaults to True), """ def __init__( self, dim: int, num_attention_heads: int, num_kv_heads: int, multiple_of: int, ffn_dim_multiplier: float, norm_eps: float, qk_norm: bool, cross_attention_dim: int, norm_elementwise_affine: bool = True, ) -> None: super().__init__() self.head_dim = dim // num_attention_heads self.gate = nn.Parameter(torch.zeros([num_attention_heads])) # Self-attention self.attn1 = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // num_attention_heads, qk_norm="layer_norm_across_heads" if qk_norm else None, heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-5, bias=False, out_bias=False, processor=LuminaAttnProcessor2_0(), ) self.attn1.to_out = nn.Identity() # Cross-attention self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim, dim_head=dim // num_attention_heads, qk_norm="layer_norm_across_heads" if qk_norm else None, heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-5, bias=False, out_bias=False, processor=LuminaAttnProcessor2_0(), ) self.feed_forward = LuminaFeedForward( dim=dim, inner_dim=int(4 * 2 * dim / 3), multiple_of=multiple_of, ffn_dim_multiplier=ffn_dim_multiplier, ) self.norm1 = LuminaRMSNormZero( embedding_dim=dim, norm_eps=norm_eps, norm_elementwise_affine=norm_elementwise_affine, ) self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.norm1_context = RMSNorm(cross_attention_dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_mask: torch.Tensor, temb: torch.Tensor, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): """ Perform a forward pass through the LuminaNextDiTBlock. Parameters: hidden_states (`torch.Tensor`): The input of hidden_states for LuminaNextDiTBlock. attention_mask (`torch.Tensor): The input of hidden_states corresponse attention mask. image_rotary_emb (`torch.Tensor`): Precomputed cosine and sine frequencies. encoder_hidden_states: (`torch.Tensor`): The hidden_states of text prompt are processed by Gemma encoder. encoder_mask (`torch.Tensor`): The hidden_states of text prompt attention mask. temb (`torch.Tensor`): Timestep embedding with text prompt embedding. cross_attention_kwargs (`Dict[str, Any]`): kwargs for cross attention. """ residual = hidden_states # Self-attention norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb) self_attn_output = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_hidden_states, attention_mask=attention_mask, query_rotary_emb=image_rotary_emb, key_rotary_emb=image_rotary_emb, **cross_attention_kwargs, ) # Cross-attention norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states) cross_attn_output = self.attn2( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=encoder_mask, query_rotary_emb=image_rotary_emb, key_rotary_emb=None, **cross_attention_kwargs, ) cross_attn_output = cross_attn_output * self.gate.tanh().view(1, 1, -1, 1) mixed_attn_output = self_attn_output + cross_attn_output mixed_attn_output = mixed_attn_output.flatten(-2) # linear proj hidden_states = self.attn2.to_out[0](mixed_attn_output) hidden_states = residual + gate_msa.unsqueeze(1).tanh() * self.norm2(hidden_states) mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1))) hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output) return hidden_states class LuminaNextDiT2DModel(ModelMixin, ConfigMixin): """ LuminaNextDiT: Diffusion model with a Transformer backbone. Inherit ModelMixin and ConfigMixin to be compatible with the sampler StableDiffusionPipeline of diffusers. Parameters: sample_size (`int`): The width of the latent images. This is fixed during training since it is used to learn a number of position embeddings. patch_size (`int`, *optional*, (`int`, *optional*, defaults to 2): The size of each patch in the image. This parameter defines the resolution of patches fed into the model. in_channels (`int`, *optional*, defaults to 4): The number of input channels for the model. Typically, this matches the number of channels in the input images. hidden_size (`int`, *optional*, defaults to 4096): The dimensionality of the hidden layers in the model. This parameter determines the width of the model's hidden representations. num_layers (`int`, *optional*, default to 32): The number of layers in the model. This defines the depth of the neural network. num_attention_heads (`int`, *optional*, defaults to 32): The number of attention heads in each attention layer. This parameter specifies how many separate attention mechanisms are used. num_kv_heads (`int`, *optional*, defaults to 8): The number of key-value heads in the attention mechanism, if different from the number of attention heads. If None, it defaults to num_attention_heads. multiple_of (`int`, *optional*, defaults to 256): A factor that the hidden size should be a multiple of. This can help optimize certain hardware configurations. ffn_dim_multiplier (`float`, *optional*): A multiplier for the dimensionality of the feed-forward network. If None, it uses a default value based on the model configuration. norm_eps (`float`, *optional*, defaults to 1e-5): A small value added to the denominator for numerical stability in normalization layers. learn_sigma (`bool`, *optional*, defaults to True): Whether the model should learn the sigma parameter, which might be related to uncertainty or variance in predictions. qk_norm (`bool`, *optional*, defaults to True): Indicates if the queries and keys in the attention mechanism should be normalized. cross_attention_dim (`int`, *optional*, defaults to 2048): The dimensionality of the text embeddings. This parameter defines the size of the text representations used in the model. scaling_factor (`float`, *optional*, defaults to 1.0): A scaling factor applied to certain parameters or layers in the model. This can be used for adjusting the overall scale of the model's operations. """ _skip_layerwise_casting_patterns = ["patch_embedder", "norm", "ffn_norm"] @register_to_config def __init__( self, sample_size: int = 128, patch_size: Optional[int] = 2, in_channels: Optional[int] = 4, hidden_size: Optional[int] = 2304, num_layers: Optional[int] = 32, num_attention_heads: Optional[int] = 32, num_kv_heads: Optional[int] = None, multiple_of: Optional[int] = 256, ffn_dim_multiplier: Optional[float] = None, norm_eps: Optional[float] = 1e-5, learn_sigma: Optional[bool] = True, qk_norm: Optional[bool] = True, cross_attention_dim: Optional[int] = 2048, scaling_factor: Optional[float] = 1.0, ) -> None: super().__init__() self.sample_size = sample_size self.patch_size = patch_size self.in_channels = in_channels self.out_channels = in_channels * 2 if learn_sigma else in_channels self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.head_dim = hidden_size // num_attention_heads self.scaling_factor = scaling_factor self.patch_embedder = LuminaPatchEmbed( patch_size=patch_size, in_channels=in_channels, embed_dim=hidden_size, bias=True ) self.pad_token = nn.Parameter(torch.empty(hidden_size)) self.time_caption_embed = LuminaCombinedTimestepCaptionEmbedding( hidden_size=min(hidden_size, 1024), cross_attention_dim=cross_attention_dim ) self.layers = nn.ModuleList( [ LuminaNextDiTBlock( hidden_size, num_attention_heads, num_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, cross_attention_dim, ) for _ in range(num_layers) ] ) self.norm_out = LuminaLayerNormContinuous( embedding_dim=hidden_size, conditioning_embedding_dim=min(hidden_size, 1024), elementwise_affine=False, eps=1e-6, bias=True, out_dim=patch_size * patch_size * self.out_channels, ) # self.final_layer = LuminaFinalLayer(hidden_size, patch_size, self.out_channels) assert (hidden_size // num_attention_heads) % 4 == 0, "2d rope needs head dim to be divisible by 4" def forward( self, hidden_states: torch.Tensor, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_mask: torch.Tensor, image_rotary_emb: torch.Tensor, cross_attention_kwargs: Dict[str, Any] = None, return_dict=True, ) -> torch.Tensor: """ Forward pass of LuminaNextDiT. Parameters: hidden_states (torch.Tensor): Input tensor of shape (N, C, H, W). timestep (torch.Tensor): Tensor of diffusion timesteps of shape (N,). encoder_hidden_states (torch.Tensor): Tensor of caption features of shape (N, D). encoder_mask (torch.Tensor): Tensor of caption masks of shape (N, L). """ hidden_states, mask, img_size, image_rotary_emb = self.patch_embedder(hidden_states, image_rotary_emb) image_rotary_emb = image_rotary_emb.to(hidden_states.device) temb = self.time_caption_embed(timestep, encoder_hidden_states, encoder_mask) encoder_mask = encoder_mask.bool() for layer in self.layers: hidden_states = layer( hidden_states, mask, image_rotary_emb, encoder_hidden_states, encoder_mask, temb=temb, cross_attention_kwargs=cross_attention_kwargs, ) hidden_states = self.norm_out(hidden_states, temb) # unpatchify height_tokens = width_tokens = self.patch_size height, width = img_size[0] batch_size = hidden_states.size(0) sequence_length = (height // height_tokens) * (width // width_tokens) hidden_states = hidden_states[:, :sequence_length].view( batch_size, height // height_tokens, width // width_tokens, height_tokens, width_tokens, self.out_channels ) output = hidden_states.permute(0, 5, 1, 3, 2, 4).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py", "repo_id": "diffusers", "token_count": 6311 }
165
# Copyright 2025 The Hunyuan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from diffusers.loaders import FromOriginalModelMixin from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers from ..attention import FeedForward from ..attention_processor import Attention, AttentionProcessor from ..cache_utils import CacheMixin from ..embeddings import ( CombinedTimestepTextProjEmbeddings, PixArtAlphaTextProjection, TimestepEmbedding, Timesteps, get_1d_rotary_pos_embed, ) from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle, FP32LayerNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class HunyuanVideoAttnProcessor2_0: def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "HunyuanVideoAttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> torch.Tensor: if attn.add_q_proj is None and encoder_hidden_states is not None: hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1) # 1. QKV projections query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2) key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2) value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2) # 2. QK normalization if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # 3. Rotational positional embeddings applied to latent stream if image_rotary_emb is not None: from ..embeddings import apply_rotary_emb if attn.add_q_proj is None and encoder_hidden_states is not None: query = torch.cat( [ apply_rotary_emb(query[:, :, : -encoder_hidden_states.shape[1]], image_rotary_emb), query[:, :, -encoder_hidden_states.shape[1] :], ], dim=2, ) key = torch.cat( [ apply_rotary_emb(key[:, :, : -encoder_hidden_states.shape[1]], image_rotary_emb), key[:, :, -encoder_hidden_states.shape[1] :], ], dim=2, ) else: query = apply_rotary_emb(query, image_rotary_emb) key = apply_rotary_emb(key, image_rotary_emb) # 4. Encoder condition QKV projection and normalization if attn.add_q_proj is not None and encoder_hidden_states is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) encoder_query = encoder_query.unflatten(2, (attn.heads, -1)).transpose(1, 2) encoder_key = encoder_key.unflatten(2, (attn.heads, -1)).transpose(1, 2) encoder_value = encoder_value.unflatten(2, (attn.heads, -1)).transpose(1, 2) if attn.norm_added_q is not None: encoder_query = attn.norm_added_q(encoder_query) if attn.norm_added_k is not None: encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([query, encoder_query], dim=2) key = torch.cat([key, encoder_key], dim=2) value = torch.cat([value, encoder_value], dim=2) # 5. Attention hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).flatten(2, 3) hidden_states = hidden_states.to(query.dtype) # 6. Output projection if encoder_hidden_states is not None: hidden_states, encoder_hidden_states = ( hidden_states[:, : -encoder_hidden_states.shape[1]], hidden_states[:, -encoder_hidden_states.shape[1] :], ) if getattr(attn, "to_out", None) is not None: hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if getattr(attn, "to_add_out", None) is not None: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return hidden_states, encoder_hidden_states class HunyuanVideoPatchEmbed(nn.Module): def __init__( self, patch_size: Union[int, Tuple[int, int, int]] = 16, in_chans: int = 3, embed_dim: int = 768, ) -> None: super().__init__() patch_size = (patch_size, patch_size, patch_size) if isinstance(patch_size, int) else patch_size self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.proj(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) # BCFHW -> BNC return hidden_states class HunyuanVideoAdaNorm(nn.Module): def __init__(self, in_features: int, out_features: Optional[int] = None) -> None: super().__init__() out_features = out_features or 2 * in_features self.linear = nn.Linear(in_features, out_features) self.nonlinearity = nn.SiLU() def forward( self, temb: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: temb = self.linear(self.nonlinearity(temb)) gate_msa, gate_mlp = temb.chunk(2, dim=1) gate_msa, gate_mlp = gate_msa.unsqueeze(1), gate_mlp.unsqueeze(1) return gate_msa, gate_mlp class HunyuanVideoTokenReplaceAdaLayerNormZero(nn.Module): def __init__(self, embedding_dim: int, norm_type: str = "layer_norm", bias: bool = True): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=bias) if norm_type == "layer_norm": self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) elif norm_type == "fp32_layer_norm": self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=False, bias=False) else: raise ValueError( f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'." ) def forward( self, hidden_states: torch.Tensor, emb: torch.Tensor, token_replace_emb: torch.Tensor, first_frame_num_tokens: int, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: emb = self.linear(self.silu(emb)) token_replace_emb = self.linear(self.silu(token_replace_emb)) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1) tr_shift_msa, tr_scale_msa, tr_gate_msa, tr_shift_mlp, tr_scale_mlp, tr_gate_mlp = token_replace_emb.chunk( 6, dim=1 ) norm_hidden_states = self.norm(hidden_states) hidden_states_zero = ( norm_hidden_states[:, :first_frame_num_tokens] * (1 + tr_scale_msa[:, None]) + tr_shift_msa[:, None] ) hidden_states_orig = ( norm_hidden_states[:, first_frame_num_tokens:] * (1 + scale_msa[:, None]) + shift_msa[:, None] ) hidden_states = torch.cat([hidden_states_zero, hidden_states_orig], dim=1) return ( hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, tr_gate_msa, tr_shift_mlp, tr_scale_mlp, tr_gate_mlp, ) class HunyuanVideoTokenReplaceAdaLayerNormZeroSingle(nn.Module): def __init__(self, embedding_dim: int, norm_type: str = "layer_norm", bias: bool = True): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 3 * embedding_dim, bias=bias) if norm_type == "layer_norm": self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) else: raise ValueError( f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'." ) def forward( self, hidden_states: torch.Tensor, emb: torch.Tensor, token_replace_emb: torch.Tensor, first_frame_num_tokens: int, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: emb = self.linear(self.silu(emb)) token_replace_emb = self.linear(self.silu(token_replace_emb)) shift_msa, scale_msa, gate_msa = emb.chunk(3, dim=1) tr_shift_msa, tr_scale_msa, tr_gate_msa = token_replace_emb.chunk(3, dim=1) norm_hidden_states = self.norm(hidden_states) hidden_states_zero = ( norm_hidden_states[:, :first_frame_num_tokens] * (1 + tr_scale_msa[:, None]) + tr_shift_msa[:, None] ) hidden_states_orig = ( norm_hidden_states[:, first_frame_num_tokens:] * (1 + scale_msa[:, None]) + shift_msa[:, None] ) hidden_states = torch.cat([hidden_states_zero, hidden_states_orig], dim=1) return hidden_states, gate_msa, tr_gate_msa class HunyuanVideoConditionEmbedding(nn.Module): def __init__( self, embedding_dim: int, pooled_projection_dim: int, guidance_embeds: bool, image_condition_type: Optional[str] = None, ): super().__init__() self.image_condition_type = image_condition_type self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu") self.guidance_embedder = None if guidance_embeds: self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) def forward( self, timestep: torch.Tensor, pooled_projection: torch.Tensor, guidance: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, torch.Tensor]: timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) pooled_projections = self.text_embedder(pooled_projection) conditioning = timesteps_emb + pooled_projections token_replace_emb = None if self.image_condition_type == "token_replace": token_replace_timestep = torch.zeros_like(timestep) token_replace_proj = self.time_proj(token_replace_timestep) token_replace_emb = self.timestep_embedder(token_replace_proj.to(dtype=pooled_projection.dtype)) token_replace_emb = token_replace_emb + pooled_projections if self.guidance_embedder is not None: guidance_proj = self.time_proj(guidance) guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) conditioning = conditioning + guidance_emb return conditioning, token_replace_emb class HunyuanVideoIndividualTokenRefinerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_width_ratio: str = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6) self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, heads=num_attention_heads, dim_head=attention_head_dim, bias=attention_bias, ) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6) self.ff = FeedForward(hidden_size, mult=mlp_width_ratio, activation_fn="linear-silu", dropout=mlp_drop_rate) self.norm_out = HunyuanVideoAdaNorm(hidden_size, 2 * hidden_size) def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=None, attention_mask=attention_mask, ) gate_msa, gate_mlp = self.norm_out(temb) hidden_states = hidden_states + attn_output * gate_msa ff_output = self.ff(self.norm2(hidden_states)) hidden_states = hidden_states + ff_output * gate_mlp return hidden_states class HunyuanVideoIndividualTokenRefiner(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, num_layers: int, mlp_width_ratio: float = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() self.refiner_blocks = nn.ModuleList( [ HunyuanVideoIndividualTokenRefinerBlock( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, mlp_width_ratio=mlp_width_ratio, mlp_drop_rate=mlp_drop_rate, attention_bias=attention_bias, ) for _ in range(num_layers) ] ) def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, ) -> None: self_attn_mask = None if attention_mask is not None: batch_size = attention_mask.shape[0] seq_len = attention_mask.shape[1] attention_mask = attention_mask.to(hidden_states.device).bool() self_attn_mask_1 = attention_mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1) self_attn_mask_2 = self_attn_mask_1.transpose(2, 3) self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool() self_attn_mask[:, :, :, 0] = True for block in self.refiner_blocks: hidden_states = block(hidden_states, temb, self_attn_mask) return hidden_states class HunyuanVideoTokenRefiner(nn.Module): def __init__( self, in_channels: int, num_attention_heads: int, attention_head_dim: int, num_layers: int, mlp_ratio: float = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.time_text_embed = CombinedTimestepTextProjEmbeddings( embedding_dim=hidden_size, pooled_projection_dim=in_channels ) self.proj_in = nn.Linear(in_channels, hidden_size, bias=True) self.token_refiner = HunyuanVideoIndividualTokenRefiner( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, num_layers=num_layers, mlp_width_ratio=mlp_ratio, mlp_drop_rate=mlp_drop_rate, attention_bias=attention_bias, ) def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, attention_mask: Optional[torch.LongTensor] = None, ) -> torch.Tensor: if attention_mask is None: pooled_projections = hidden_states.mean(dim=1) else: original_dtype = hidden_states.dtype mask_float = attention_mask.float().unsqueeze(-1) pooled_projections = (hidden_states * mask_float).sum(dim=1) / mask_float.sum(dim=1) pooled_projections = pooled_projections.to(original_dtype) temb = self.time_text_embed(timestep, pooled_projections) hidden_states = self.proj_in(hidden_states) hidden_states = self.token_refiner(hidden_states, temb, attention_mask) return hidden_states class HunyuanVideoRotaryPosEmbed(nn.Module): def __init__(self, patch_size: int, patch_size_t: int, rope_dim: List[int], theta: float = 256.0) -> None: super().__init__() self.patch_size = patch_size self.patch_size_t = patch_size_t self.rope_dim = rope_dim self.theta = theta def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape rope_sizes = [num_frames // self.patch_size_t, height // self.patch_size, width // self.patch_size] axes_grids = [] for i in range(3): # Note: The following line diverges from original behaviour. We create the grid on the device, whereas # original implementation creates it on CPU and then moves it to device. This results in numerical # differences in layerwise debugging outputs, but visually it is the same. grid = torch.arange(0, rope_sizes[i], device=hidden_states.device, dtype=torch.float32) axes_grids.append(grid) grid = torch.meshgrid(*axes_grids, indexing="ij") # [W, H, T] grid = torch.stack(grid, dim=0) # [3, W, H, T] freqs = [] for i in range(3): freq = get_1d_rotary_pos_embed(self.rope_dim[i], grid[i].reshape(-1), self.theta, use_real=True) freqs.append(freq) freqs_cos = torch.cat([f[0] for f in freqs], dim=1) # (W * H * T, D / 2) freqs_sin = torch.cat([f[1] for f in freqs], dim=1) # (W * H * T, D / 2) return freqs_cos, freqs_sin class HunyuanVideoSingleTransformerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0, qk_norm: str = "rms_norm", ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim mlp_dim = int(hidden_size * mlp_ratio) self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=hidden_size, bias=True, processor=HunyuanVideoAttnProcessor2_0(), qk_norm=qk_norm, eps=1e-6, pre_only=True, ) self.norm = AdaLayerNormZeroSingle(hidden_size, norm_type="layer_norm") self.proj_mlp = nn.Linear(hidden_size, mlp_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(hidden_size + mlp_dim, hidden_size) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.shape[1] hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1) residual = hidden_states # 1. Input normalization norm_hidden_states, gate = self.norm(hidden_states, emb=temb) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) norm_hidden_states, norm_encoder_hidden_states = ( norm_hidden_states[:, :-text_seq_length, :], norm_hidden_states[:, -text_seq_length:, :], ) # 2. Attention attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) attn_output = torch.cat([attn_output, context_attn_output], dim=1) # 3. Modulation and residual connection hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) hidden_states = gate.unsqueeze(1) * self.proj_out(hidden_states) hidden_states = hidden_states + residual hidden_states, encoder_hidden_states = ( hidden_states[:, :-text_seq_length, :], hidden_states[:, -text_seq_length:, :], ) return hidden_states, encoder_hidden_states class HunyuanVideoTransformerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float, qk_norm: str = "rms_norm", ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.norm1 = AdaLayerNormZero(hidden_size, norm_type="layer_norm") self.norm1_context = AdaLayerNormZero(hidden_size, norm_type="layer_norm") self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, added_kv_proj_dim=hidden_size, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=hidden_size, context_pre_only=False, bias=True, processor=HunyuanVideoAttnProcessor2_0(), qk_norm=qk_norm, eps=1e-6, ) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, torch.Tensor]: # 1. Input normalization norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) # 2. Joint attention attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=attention_mask, image_rotary_emb=freqs_cis, ) # 3. Modulation and residual connection hidden_states = hidden_states + attn_output * gate_msa.unsqueeze(1) encoder_hidden_states = encoder_hidden_states + context_attn_output * c_gate_msa.unsqueeze(1) norm_hidden_states = self.norm2(hidden_states) norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] # 4. Feed-forward ff_output = self.ff(norm_hidden_states) context_ff_output = self.ff_context(norm_encoder_hidden_states) hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output return hidden_states, encoder_hidden_states class HunyuanVideoTokenReplaceSingleTransformerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0, qk_norm: str = "rms_norm", ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim mlp_dim = int(hidden_size * mlp_ratio) self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=hidden_size, bias=True, processor=HunyuanVideoAttnProcessor2_0(), qk_norm=qk_norm, eps=1e-6, pre_only=True, ) self.norm = HunyuanVideoTokenReplaceAdaLayerNormZeroSingle(hidden_size, norm_type="layer_norm") self.proj_mlp = nn.Linear(hidden_size, mlp_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(hidden_size + mlp_dim, hidden_size) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, token_replace_emb: torch.Tensor = None, num_tokens: int = None, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.shape[1] hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1) residual = hidden_states # 1. Input normalization norm_hidden_states, gate, tr_gate = self.norm(hidden_states, temb, token_replace_emb, num_tokens) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) norm_hidden_states, norm_encoder_hidden_states = ( norm_hidden_states[:, :-text_seq_length, :], norm_hidden_states[:, -text_seq_length:, :], ) # 2. Attention attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) attn_output = torch.cat([attn_output, context_attn_output], dim=1) # 3. Modulation and residual connection hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) proj_output = self.proj_out(hidden_states) hidden_states_zero = proj_output[:, :num_tokens] * tr_gate.unsqueeze(1) hidden_states_orig = proj_output[:, num_tokens:] * gate.unsqueeze(1) hidden_states = torch.cat([hidden_states_zero, hidden_states_orig], dim=1) hidden_states = hidden_states + residual hidden_states, encoder_hidden_states = ( hidden_states[:, :-text_seq_length, :], hidden_states[:, -text_seq_length:, :], ) return hidden_states, encoder_hidden_states class HunyuanVideoTokenReplaceTransformerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float, qk_norm: str = "rms_norm", ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.norm1 = HunyuanVideoTokenReplaceAdaLayerNormZero(hidden_size, norm_type="layer_norm") self.norm1_context = AdaLayerNormZero(hidden_size, norm_type="layer_norm") self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, added_kv_proj_dim=hidden_size, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=hidden_size, context_pre_only=False, bias=True, processor=HunyuanVideoAttnProcessor2_0(), qk_norm=qk_norm, eps=1e-6, ) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, token_replace_emb: torch.Tensor = None, num_tokens: int = None, ) -> Tuple[torch.Tensor, torch.Tensor]: # 1. Input normalization ( norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, tr_gate_msa, tr_shift_mlp, tr_scale_mlp, tr_gate_mlp, ) = self.norm1(hidden_states, temb, token_replace_emb, num_tokens) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) # 2. Joint attention attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=attention_mask, image_rotary_emb=freqs_cis, ) # 3. Modulation and residual connection hidden_states_zero = hidden_states[:, :num_tokens] + attn_output[:, :num_tokens] * tr_gate_msa.unsqueeze(1) hidden_states_orig = hidden_states[:, num_tokens:] + attn_output[:, num_tokens:] * gate_msa.unsqueeze(1) hidden_states = torch.cat([hidden_states_zero, hidden_states_orig], dim=1) encoder_hidden_states = encoder_hidden_states + context_attn_output * c_gate_msa.unsqueeze(1) norm_hidden_states = self.norm2(hidden_states) norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) hidden_states_zero = norm_hidden_states[:, :num_tokens] * (1 + tr_scale_mlp[:, None]) + tr_shift_mlp[:, None] hidden_states_orig = norm_hidden_states[:, num_tokens:] * (1 + scale_mlp[:, None]) + shift_mlp[:, None] norm_hidden_states = torch.cat([hidden_states_zero, hidden_states_orig], dim=1) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] # 4. Feed-forward ff_output = self.ff(norm_hidden_states) context_ff_output = self.ff_context(norm_encoder_hidden_states) hidden_states_zero = hidden_states[:, :num_tokens] + ff_output[:, :num_tokens] * tr_gate_mlp.unsqueeze(1) hidden_states_orig = hidden_states[:, num_tokens:] + ff_output[:, num_tokens:] * gate_mlp.unsqueeze(1) hidden_states = torch.cat([hidden_states_zero, hidden_states_orig], dim=1) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output return hidden_states, encoder_hidden_states class HunyuanVideoTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): r""" A Transformer model for video-like data used in [HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo). Args: in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, defaults to `16`): The number of channels in the output. num_attention_heads (`int`, defaults to `24`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `128`): The number of channels in each head. num_layers (`int`, defaults to `20`): The number of layers of dual-stream blocks to use. num_single_layers (`int`, defaults to `40`): The number of layers of single-stream blocks to use. num_refiner_layers (`int`, defaults to `2`): The number of layers of refiner blocks to use. mlp_ratio (`float`, defaults to `4.0`): The ratio of the hidden layer size to the input size in the feedforward network. patch_size (`int`, defaults to `2`): The size of the spatial patches to use in the patch embedding layer. patch_size_t (`int`, defaults to `1`): The size of the tmeporal patches to use in the patch embedding layer. qk_norm (`str`, defaults to `rms_norm`): The normalization to use for the query and key projections in the attention layers. guidance_embeds (`bool`, defaults to `True`): Whether to use guidance embeddings in the model. text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. pooled_projection_dim (`int`, defaults to `768`): The dimension of the pooled projection of the text embeddings. rope_theta (`float`, defaults to `256.0`): The value of theta to use in the RoPE layer. rope_axes_dim (`Tuple[int]`, defaults to `(16, 56, 56)`): The dimensions of the axes to use in the RoPE layer. image_condition_type (`str`, *optional*, defaults to `None`): The type of image conditioning to use. If `None`, no image conditioning is used. If `latent_concat`, the image is concatenated to the latent stream. If `token_replace`, the image is used to replace first-frame tokens in the latent stream and apply conditioning. """ _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["x_embedder", "context_embedder", "norm"] _no_split_modules = [ "HunyuanVideoTransformerBlock", "HunyuanVideoSingleTransformerBlock", "HunyuanVideoPatchEmbed", "HunyuanVideoTokenRefiner", ] _repeated_blocks = [ "HunyuanVideoTransformerBlock", "HunyuanVideoSingleTransformerBlock", "HunyuanVideoPatchEmbed", "HunyuanVideoTokenRefiner", ] @register_to_config def __init__( self, in_channels: int = 16, out_channels: int = 16, num_attention_heads: int = 24, attention_head_dim: int = 128, num_layers: int = 20, num_single_layers: int = 40, num_refiner_layers: int = 2, mlp_ratio: float = 4.0, patch_size: int = 2, patch_size_t: int = 1, qk_norm: str = "rms_norm", guidance_embeds: bool = True, text_embed_dim: int = 4096, pooled_projection_dim: int = 768, rope_theta: float = 256.0, rope_axes_dim: Tuple[int] = (16, 56, 56), image_condition_type: Optional[str] = None, ) -> None: super().__init__() supported_image_condition_types = ["latent_concat", "token_replace"] if image_condition_type is not None and image_condition_type not in supported_image_condition_types: raise ValueError( f"Invalid `image_condition_type` ({image_condition_type}). Supported ones are: {supported_image_condition_types}" ) inner_dim = num_attention_heads * attention_head_dim out_channels = out_channels or in_channels # 1. Latent and condition embedders self.x_embedder = HunyuanVideoPatchEmbed((patch_size_t, patch_size, patch_size), in_channels, inner_dim) self.context_embedder = HunyuanVideoTokenRefiner( text_embed_dim, num_attention_heads, attention_head_dim, num_layers=num_refiner_layers ) self.time_text_embed = HunyuanVideoConditionEmbedding( inner_dim, pooled_projection_dim, guidance_embeds, image_condition_type ) # 2. RoPE self.rope = HunyuanVideoRotaryPosEmbed(patch_size, patch_size_t, rope_axes_dim, rope_theta) # 3. Dual stream transformer blocks if image_condition_type == "token_replace": self.transformer_blocks = nn.ModuleList( [ HunyuanVideoTokenReplaceTransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_layers) ] ) else: self.transformer_blocks = nn.ModuleList( [ HunyuanVideoTransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_layers) ] ) # 4. Single stream transformer blocks if image_condition_type == "token_replace": self.single_transformer_blocks = nn.ModuleList( [ HunyuanVideoTokenReplaceSingleTransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_single_layers) ] ) else: self.single_transformer_blocks = nn.ModuleList( [ HunyuanVideoSingleTransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_single_layers) ] ) # 5. Output projection self.norm_out = AdaLayerNormContinuous(inner_dim, inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(inner_dim, patch_size_t * patch_size * patch_size * out_channels) self.gradient_checkpointing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_hidden_states: torch.Tensor, encoder_attention_mask: torch.Tensor, pooled_projections: torch.Tensor, guidance: torch.Tensor = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]: if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." ) batch_size, num_channels, num_frames, height, width = hidden_states.shape p, p_t = self.config.patch_size, self.config.patch_size_t post_patch_num_frames = num_frames // p_t post_patch_height = height // p post_patch_width = width // p first_frame_num_tokens = 1 * post_patch_height * post_patch_width # 1. RoPE image_rotary_emb = self.rope(hidden_states) # 2. Conditional embeddings temb, token_replace_emb = self.time_text_embed(timestep, pooled_projections, guidance) hidden_states = self.x_embedder(hidden_states) encoder_hidden_states = self.context_embedder(encoder_hidden_states, timestep, encoder_attention_mask) # 3. Attention mask preparation latent_sequence_length = hidden_states.shape[1] condition_sequence_length = encoder_hidden_states.shape[1] sequence_length = latent_sequence_length + condition_sequence_length attention_mask = torch.ones( batch_size, sequence_length, device=hidden_states.device, dtype=torch.bool ) # [B, N] effective_condition_sequence_length = encoder_attention_mask.sum(dim=1, dtype=torch.int) # [B,] effective_sequence_length = latent_sequence_length + effective_condition_sequence_length indices = torch.arange(sequence_length, device=hidden_states.device).unsqueeze(0) # [1, N] mask_indices = indices >= effective_sequence_length.unsqueeze(1) # [B, N] attention_mask = attention_mask.masked_fill(mask_indices, False) attention_mask = attention_mask.unsqueeze(1).unsqueeze(1) # [B, 1, 1, N] # 4. Transformer blocks if torch.is_grad_enabled() and self.gradient_checkpointing: for block in self.transformer_blocks: hidden_states, encoder_hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb, token_replace_emb, first_frame_num_tokens, ) for block in self.single_transformer_blocks: hidden_states, encoder_hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb, token_replace_emb, first_frame_num_tokens, ) else: for block in self.transformer_blocks: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb, token_replace_emb, first_frame_num_tokens, ) for block in self.single_transformer_blocks: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb, token_replace_emb, first_frame_num_tokens, ) # 5. Output projection hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, -1, p_t, p, p ) hidden_states = hidden_states.permute(0, 4, 1, 5, 2, 6, 3, 7) hidden_states = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (hidden_states,) return Transformer2DModelOutput(sample=hidden_states)
diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_hunyuan_video.py", "repo_id": "diffusers", "token_count": 22041 }
166
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from torch import nn from ...utils import deprecate, logging from ...utils.torch_utils import apply_freeu from ..activations import get_activation from ..attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0 from ..normalization import AdaGroupNorm from ..resnet import ( Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, ResnetBlockCondNorm2D, Upsample2D, ) from ..transformers.dual_transformer_2d import DualTransformer2DModel from ..transformers.transformer_2d import Transformer2DModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name def get_down_block( down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, downsample_padding: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: float = 1.0, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = None, downsample_type: Optional[str] = None, dropout: float = 0.0, ): # If attn head dim is not defined, we default it to the number of heads if attention_head_dim is None: logger.warning( f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}." ) attention_head_dim = num_attention_heads down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type if down_block_type == "DownBlock2D": return DownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "ResnetDownsampleBlock2D": return ResnetDownsampleBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, ) elif down_block_type == "AttnDownBlock2D": if add_downsample is False: downsample_type = None else: downsample_type = downsample_type or "conv" # default to 'conv' return AttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, downsample_type=downsample_type, ) elif down_block_type == "CrossAttnDownBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") return CrossAttnDownBlock2D( num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, ) elif down_block_type == "SimpleCrossAttnDownBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D") return SimpleCrossAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif down_block_type == "SkipDownBlock2D": return SkipDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "AttnSkipDownBlock2D": return AttnSkipDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "DownEncoderBlock2D": return DownEncoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "AttnDownEncoderBlock2D": return AttnDownEncoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "KDownBlock2D": return KDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, ) elif down_block_type == "KCrossAttnDownBlock2D": return KCrossAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, dropout=dropout, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, add_self_attention=True if not add_downsample else False, ) raise ValueError(f"{down_block_type} does not exist.") def get_mid_block( mid_block_type: str, temb_channels: int, in_channels: int, resnet_eps: float, resnet_act_fn: str, resnet_groups: int, output_scale_factor: float = 1.0, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, mid_block_only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = 1, dropout: float = 0.0, ): if mid_block_type == "UNetMidBlock2DCrossAttn": return UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, resnet_groups=resnet_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": return UNetMidBlock2DSimpleCrossAttn( in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type == "UNetMidBlock2D": return UNetMidBlock2D( in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, num_layers=0, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False, ) elif mid_block_type is None: return None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") def get_up_block( up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, resolution_idx: Optional[int] = None, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: float = 1.0, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = None, upsample_type: Optional[str] = None, dropout: float = 0.0, ) -> nn.Module: # If attn head dim is not defined, we default it to the number of heads if attention_head_dim is None: logger.warning( f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}." ) attention_head_dim = num_attention_heads up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type if up_block_type == "UpBlock2D": return UpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "ResnetUpsampleBlock2D": return ResnetUpsampleBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, ) elif up_block_type == "CrossAttnUpBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") return CrossAttnUpBlock2D( num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, ) elif up_block_type == "SimpleCrossAttnUpBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D") return SimpleCrossAttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif up_block_type == "AttnUpBlock2D": if add_upsample is False: upsample_type = None else: upsample_type = upsample_type or "conv" # default to 'conv' return AttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, upsample_type=upsample_type, ) elif up_block_type == "SkipUpBlock2D": return SkipUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "AttnSkipUpBlock2D": return AttnSkipUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "UpDecoderBlock2D": return UpDecoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, temb_channels=temb_channels, ) elif up_block_type == "AttnUpDecoderBlock2D": return AttnUpDecoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, temb_channels=temb_channels, ) elif up_block_type == "KUpBlock2D": return KUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, ) elif up_block_type == "KCrossAttnUpBlock2D": return KCrossAttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, ) raise ValueError(f"{up_block_type} does not exist.") class AutoencoderTinyBlock(nn.Module): """ Tiny Autoencoder block used in [`AutoencoderTiny`]. It is a mini residual module consisting of plain conv + ReLU blocks. Args: in_channels (`int`): The number of input channels. out_channels (`int`): The number of output channels. act_fn (`str`): ` The activation function to use. Supported values are `"swish"`, `"mish"`, `"gelu"`, and `"relu"`. Returns: `torch.Tensor`: A tensor with the same shape as the input tensor, but with the number of channels equal to `out_channels`. """ def __init__(self, in_channels: int, out_channels: int, act_fn: str): super().__init__() act_fn = get_activation(act_fn) self.conv = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), act_fn, nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), act_fn, nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), ) self.skip = ( nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) if in_channels != out_channels else nn.Identity() ) self.fuse = nn.ReLU() def forward(self, x: torch.Tensor) -> torch.Tensor: return self.fuse(self.conv(x) + self.skip(x)) class UNetMidBlock2D(nn.Module): """ A 2D UNet mid-block [`UNetMidBlock2D`] with multiple residual blocks and optional attention blocks. Args: in_channels (`int`): The number of input channels. temb_channels (`int`): The number of temporal embedding channels. dropout (`float`, *optional*, defaults to 0.0): The dropout rate. num_layers (`int`, *optional*, defaults to 1): The number of residual blocks. resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks. resnet_time_scale_shift (`str`, *optional*, defaults to `default`): The type of normalization to apply to the time embeddings. This can help to improve the performance of the model on tasks with long-range temporal dependencies. resnet_act_fn (`str`, *optional*, defaults to `swish`): The activation function for the resnet blocks. resnet_groups (`int`, *optional*, defaults to 32): The number of groups to use in the group normalization layers of the resnet blocks. attn_groups (`Optional[int]`, *optional*, defaults to None): The number of groups for the attention blocks. resnet_pre_norm (`bool`, *optional*, defaults to `True`): Whether to use pre-normalization for the resnet blocks. add_attention (`bool`, *optional*, defaults to `True`): Whether to add attention blocks. attention_head_dim (`int`, *optional*, defaults to 1): Dimension of a single attention head. The number of attention heads is determined based on this value and the number of input channels. output_scale_factor (`float`, *optional*, defaults to 1.0): The output scale factor. Returns: `torch.Tensor`: The output of the last residual block, which is a tensor of shape `(batch_size, in_channels, height, width)`. """ def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", # default, spatial resnet_act_fn: str = "swish", resnet_groups: int = 32, attn_groups: Optional[int] = None, resnet_pre_norm: bool = True, add_attention: bool = True, attention_head_dim: int = 1, output_scale_factor: float = 1.0, ): super().__init__() resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.add_attention = add_attention if attn_groups is None: attn_groups = resnet_groups if resnet_time_scale_shift == "default" else None # there is always at least one resnet if resnet_time_scale_shift == "spatial": resnets = [ ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm="spatial", non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, ) ] else: resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] if attention_head_dim is None: logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." ) attention_head_dim = in_channels for _ in range(num_layers): if self.add_attention: attentions.append( Attention( in_channels, heads=in_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=attn_groups, spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) else: attentions.append(None) if resnet_time_scale_shift == "spatial": resnets.append( ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm="spatial", non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, ) ) else: resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): if torch.is_grad_enabled() and self.gradient_checkpointing: if attn is not None: hidden_states = attn(hidden_states, temb=temb) hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: if attn is not None: hidden_states = attn(hidden_states, temb=temb) hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlock2DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, out_channels: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_groups_out: Optional[int] = None, resnet_pre_norm: bool = True, num_attention_heads: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, dual_cross_attention: bool = False, use_linear_projection: bool = False, upcast_attention: bool = False, attention_type: str = "default", ): super().__init__() out_channels = out_channels or in_channels self.in_channels = in_channels self.out_channels = out_channels self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # support for variable transformer layers per block if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers resnet_groups_out = resnet_groups_out or resnet_groups # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, groups_out=resnet_groups_out, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] for i in range(num_layers): if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups_out, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups_out, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlock2DSimpleCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, skip_time_act: bool = False, only_cross_attention: bool = False, cross_attention_norm: Optional[str] = None, ): super().__init__() self.has_cross_attention = True self.attention_head_dim = attention_head_dim resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.num_heads = in_channels // self.attention_head_dim # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ] attentions = [] for _ in range(num_layers): processor = ( AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() ) attentions.append( Attention( query_dim=in_channels, cross_attention_dim=in_channels, heads=self.num_heads, dim_head=self.attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") if attention_mask is None: # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. mask = None if encoder_hidden_states is None else encoder_attention_mask else: # when attention_mask is defined: we don't even check for encoder_attention_mask. # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. # then we can simplify this whole if/else block to: # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask mask = attention_mask hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): # attn hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs, ) # resnet hidden_states = resnet(hidden_states, temb) return hidden_states class AttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim: int = 1, output_scale_factor: float = 1.0, downsample_padding: int = 1, downsample_type: str = "conv", ): super().__init__() resnets = [] attentions = [] self.downsample_type = downsample_type if attention_head_dim is None: logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." ) attention_head_dim = out_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( Attention( out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if downsample_type == "conv": self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) elif downsample_type == "resnet": self.downsamplers = nn.ModuleList( [ ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, down=True, ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") output_states = () for resnet, attn in zip(self.resnets, self.attentions): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) hidden_states = attn(hidden_states, **cross_attention_kwargs) output_states = output_states + (hidden_states,) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states, **cross_attention_kwargs) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: if self.downsample_type == "resnet": hidden_states = downsampler(hidden_states, temb=temb) else: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class CrossAttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, downsample_padding: int = 1, add_downsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, attention_type: str = "default", ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, additional_residuals: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") output_states = () blocks = list(zip(self.resnets, self.attentions)) for i, (resnet, attn) in enumerate(blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] # apply additional residuals to the output of the last pair of resnet and attention blocks if i == len(blocks) - 1 and additional_residuals is not None: hidden_states = hidden_states + additional_residuals output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states class DownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, *args, **kwargs ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) output_states = () for resnet in self.resnets: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states class DownEncoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels if resnet_time_scale_shift == "spatial": resnets.append( ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm="spatial", non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, ) ) else: resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) for resnet in self.resnets: hidden_states = resnet(hidden_states, temb=None) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states class AttnDownEncoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim: int = 1, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, ): super().__init__() resnets = [] attentions = [] if attention_head_dim is None: logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." ) attention_head_dim = out_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels if resnet_time_scale_shift == "spatial": resnets.append( ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm="spatial", non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, ) ) else: resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( Attention( out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb=None) hidden_states = attn(hidden_states) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states class AttnSkipDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_pre_norm: bool = True, attention_head_dim: int = 1, output_scale_factor: float = np.sqrt(2.0), add_downsample: bool = True, ): super().__init__() self.attentions = nn.ModuleList([]) self.resnets = nn.ModuleList([]) if attention_head_dim is None: logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." ) attention_head_dim = out_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(in_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions.append( Attention( out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=32, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) if add_downsample: self.resnet_down = ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, down=True, kernel="fir", ) self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) else: self.resnet_down = None self.downsamplers = None self.skip_conv = None def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, skip_sample: Optional[torch.Tensor] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...], torch.Tensor]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) output_states = () for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) output_states += (hidden_states,) if self.downsamplers is not None: hidden_states = self.resnet_down(hidden_states, temb) for downsampler in self.downsamplers: skip_sample = downsampler(skip_sample) hidden_states = self.skip_conv(skip_sample) + hidden_states output_states += (hidden_states,) return hidden_states, output_states, skip_sample class SkipDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_pre_norm: bool = True, output_scale_factor: float = np.sqrt(2.0), add_downsample: bool = True, downsample_padding: int = 1, ): super().__init__() self.resnets = nn.ModuleList([]) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(in_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if add_downsample: self.resnet_down = ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, down=True, kernel="fir", ) self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) else: self.resnet_down = None self.downsamplers = None self.skip_conv = None def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, skip_sample: Optional[torch.Tensor] = None, *args, **kwargs, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...], torch.Tensor]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) output_states = () for resnet in self.resnets: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: hidden_states = self.resnet_down(hidden_states, temb) for downsampler in self.downsamplers: skip_sample = downsampler(skip_sample) hidden_states = self.skip_conv(skip_sample) + hidden_states output_states += (hidden_states,) return hidden_states, output_states, skip_sample class ResnetDownsampleBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, skip_time_act: bool = False, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, down=True, ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, *args, **kwargs ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) output_states = () for resnet in self.resnets: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states, temb) output_states = output_states + (hidden_states,) return hidden_states, output_states class SimpleCrossAttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_downsample: bool = True, skip_time_act: bool = False, only_cross_attention: bool = False, cross_attention_norm: Optional[str] = None, ): super().__init__() self.has_cross_attention = True resnets = [] attentions = [] self.attention_head_dim = attention_head_dim self.num_heads = out_channels // self.attention_head_dim for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ) processor = ( AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() ) attentions.append( Attention( query_dim=out_channels, cross_attention_dim=out_channels, heads=self.num_heads, dim_head=attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, down=True, ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") output_states = () if attention_mask is None: # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. mask = None if encoder_hidden_states is None else encoder_attention_mask else: # when attention_mask is defined: we don't even check for encoder_attention_mask. # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. # then we can simplify this whole if/else block to: # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask mask = attention_mask for resnet, attn in zip(self.resnets, self.attentions): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs, ) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs, ) output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states, temb) output_states = output_states + (hidden_states,) return hidden_states, output_states class KDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 4, resnet_eps: float = 1e-5, resnet_act_fn: str = "gelu", resnet_group_size: int = 32, add_downsample: bool = False, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size resnets.append( ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=groups, groups_out=groups_out, eps=resnet_eps, non_linearity=resnet_act_fn, time_embedding_norm="ada_group", conv_shortcut_bias=False, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: # YiYi's comments- might be able to use FirDownsample2D, look into details later self.downsamplers = nn.ModuleList([KDownsample2D()]) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, *args, **kwargs ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) output_states = () for resnet in self.resnets: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states, output_states class KCrossAttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, cross_attention_dim: int, dropout: float = 0.0, num_layers: int = 4, resnet_group_size: int = 32, add_downsample: bool = True, attention_head_dim: int = 64, add_self_attention: bool = False, resnet_eps: float = 1e-5, resnet_act_fn: str = "gelu", ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size resnets.append( ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=groups, groups_out=groups_out, eps=resnet_eps, non_linearity=resnet_act_fn, time_embedding_norm="ada_group", conv_shortcut_bias=False, ) ) attentions.append( KAttentionBlock( out_channels, out_channels // attention_head_dim, attention_head_dim, cross_attention_dim=cross_attention_dim, temb_channels=temb_channels, attention_bias=True, add_self_attention=add_self_attention, cross_attention_norm="layer_norm", group_size=resnet_group_size, ) ) self.resnets = nn.ModuleList(resnets) self.attentions = nn.ModuleList(attentions) if add_downsample: self.downsamplers = nn.ModuleList([KDownsample2D()]) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") output_states = () for resnet, attn in zip(self.resnets, self.attentions): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( resnet, hidden_states, temb, ) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) if self.downsamplers is None: output_states += (None,) else: output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states, output_states class AttnUpBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: int = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim: int = 1, output_scale_factor: float = 1.0, upsample_type: str = "conv", ): super().__init__() resnets = [] attentions = [] self.upsample_type = upsample_type if attention_head_dim is None: logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}." ) attention_head_dim = out_channels for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( Attention( out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if upsample_type == "conv": self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) elif upsample_type == "resnet": self.upsamplers = nn.ModuleList( [ ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, up=True, ) ] ) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) hidden_states = attn(hidden_states) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) if self.upsamplers is not None: for upsampler in self.upsamplers: if self.upsample_type == "resnet": hidden_states = upsampler(hidden_states, temb=temb) else: hidden_states = upsampler(hidden_states) return hidden_states class CrossAttnUpBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: Union[int, Tuple[int]] = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, attention_type: str = "default", ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * num_layers for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block[i], cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, attention_type=attention_type, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") is_freeu_enabled = ( getattr(self, "s1", None) and getattr(self, "s2", None) and getattr(self, "b1", None) and getattr(self, "b2", None) ) for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] # FreeU: Only operate on the first two stages if is_freeu_enabled: hidden_states, res_hidden_states = apply_freeu( self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2, ) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UpBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_upsample: bool = True, ): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) is_freeu_enabled = ( getattr(self, "s1", None) and getattr(self, "s2", None) and getattr(self, "b1", None) and getattr(self, "b2", None) ) for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] # FreeU: Only operate on the first two stages if is_freeu_enabled: hidden_states, res_hidden_states = apply_freeu( self.resolution_idx, hidden_states, res_hidden_states, s1=self.s1, s2=self.s2, b1=self.b1, b2=self.b2, ) hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UpDecoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", # default, spatial resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_upsample: bool = True, temb_channels: Optional[int] = None, ): super().__init__() resnets = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels if resnet_time_scale_shift == "spatial": resnets.append( ResnetBlockCondNorm2D( in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm="spatial", non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, ) ) else: resnets.append( ResnetBlock2D( in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: for resnet in self.resnets: hidden_states = resnet(hidden_states, temb=temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class AttnUpDecoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim: int = 1, output_scale_factor: float = 1.0, add_upsample: bool = True, temb_channels: Optional[int] = None, ): super().__init__() resnets = [] attentions = [] if attention_head_dim is None: logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}." ) attention_head_dim = out_channels for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels if resnet_time_scale_shift == "spatial": resnets.append( ResnetBlockCondNorm2D( in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm="spatial", non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, ) ) else: resnets.append( ResnetBlock2D( in_channels=input_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( Attention( out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups if resnet_time_scale_shift != "spatial" else None, spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.resolution_idx = resolution_idx def forward(self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb=temb) hidden_states = attn(hidden_states, temb=temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class AttnSkipUpBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_pre_norm: bool = True, attention_head_dim: int = 1, output_scale_factor: float = np.sqrt(2.0), add_upsample: bool = True, ): super().__init__() self.attentions = nn.ModuleList([]) self.resnets = nn.ModuleList([]) for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels self.resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(resnet_in_channels + res_skip_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if attention_head_dim is None: logger.warning( f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}." ) attention_head_dim = out_channels self.attentions.append( Attention( out_channels, heads=out_channels // attention_head_dim, dim_head=attention_head_dim, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=32, residual_connection=True, bias=True, upcast_softmax=True, _from_deprecated_attn_block=True, ) ) self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) if add_upsample: self.resnet_up = ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, up=True, kernel="fir", ) self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) self.skip_norm = torch.nn.GroupNorm( num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True ) self.act = nn.SiLU() else: self.resnet_up = None self.skip_conv = None self.skip_norm = None self.act = None self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, skip_sample=None, *args, **kwargs, ) -> Tuple[torch.Tensor, torch.Tensor]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = self.attentions[0](hidden_states) if skip_sample is not None: skip_sample = self.upsampler(skip_sample) else: skip_sample = 0 if self.resnet_up is not None: skip_sample_states = self.skip_norm(hidden_states) skip_sample_states = self.act(skip_sample_states) skip_sample_states = self.skip_conv(skip_sample_states) skip_sample = skip_sample + skip_sample_states hidden_states = self.resnet_up(hidden_states, temb) return hidden_states, skip_sample class SkipUpBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_pre_norm: bool = True, output_scale_factor: float = np.sqrt(2.0), add_upsample: bool = True, upsample_padding: int = 1, ): super().__init__() self.resnets = nn.ModuleList([]) for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels self.resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min((resnet_in_channels + res_skip_channels) // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels) if add_upsample: self.resnet_up = ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, up=True, kernel="fir", ) self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) self.skip_norm = torch.nn.GroupNorm( num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True ) self.act = nn.SiLU() else: self.resnet_up = None self.skip_conv = None self.skip_norm = None self.act = None self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, skip_sample=None, *args, **kwargs, ) -> Tuple[torch.Tensor, torch.Tensor]: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) if skip_sample is not None: skip_sample = self.upsampler(skip_sample) else: skip_sample = 0 if self.resnet_up is not None: skip_sample_states = self.skip_norm(hidden_states) skip_sample_states = self.act(skip_sample_states) skip_sample_states = self.skip_conv(skip_sample_states) skip_sample = skip_sample + skip_sample_states hidden_states = self.resnet_up(hidden_states, temb) return hidden_states, skip_sample class ResnetUpsampleBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_upsample: bool = True, skip_time_act: bool = False, ): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList( [ ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, up=True, ) ] ) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, temb) return hidden_states class SimpleCrossAttnUpBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, resolution_idx: Optional[int] = None, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attention_head_dim: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, skip_time_act: bool = False, only_cross_attention: bool = False, cross_attention_norm: Optional[str] = None, ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.attention_head_dim = attention_head_dim self.num_heads = out_channels // self.attention_head_dim for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, ) ) processor = ( AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor() ) attentions.append( Attention( query_dim=out_channels, cross_attention_dim=out_channels, heads=self.num_heads, dim_head=self.attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList( [ ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, up=True, ) ] ) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") if attention_mask is None: # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask. mask = None if encoder_hidden_states is None else encoder_attention_mask else: # when attention_mask is defined: we don't even check for encoder_attention_mask. # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks. # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask. # then we can simplify this whole if/else block to: # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask mask = attention_mask for resnet, attn in zip(self.resnets, self.attentions): # resnet # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs, ) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs, ) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, temb) return hidden_states class KUpBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, resolution_idx: int, dropout: float = 0.0, num_layers: int = 5, resnet_eps: float = 1e-5, resnet_act_fn: str = "gelu", resnet_group_size: Optional[int] = 32, add_upsample: bool = True, ): super().__init__() resnets = [] k_in_channels = 2 * out_channels k_out_channels = in_channels num_layers = num_layers - 1 for i in range(num_layers): in_channels = k_in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size resnets.append( ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=k_out_channels if (i == num_layers - 1) else out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=groups, groups_out=groups_out, dropout=dropout, non_linearity=resnet_act_fn, time_embedding_norm="ada_group", conv_shortcut_bias=False, ) ) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([KUpsample2D()]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, upsample_size: Optional[int] = None, *args, **kwargs, ) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) res_hidden_states_tuple = res_hidden_states_tuple[-1] if res_hidden_states_tuple is not None: hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) for resnet in self.resnets: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class KCrossAttnUpBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, resolution_idx: int, dropout: float = 0.0, num_layers: int = 4, resnet_eps: float = 1e-5, resnet_act_fn: str = "gelu", resnet_group_size: int = 32, attention_head_dim: int = 1, # attention dim_head cross_attention_dim: int = 768, add_upsample: bool = True, upcast_attention: bool = False, ): super().__init__() resnets = [] attentions = [] is_first_block = in_channels == out_channels == temb_channels is_middle_block = in_channels != out_channels add_self_attention = True if is_first_block else False self.has_cross_attention = True self.attention_head_dim = attention_head_dim # in_channels, and out_channels for the block (k-unet) k_in_channels = out_channels if is_first_block else 2 * out_channels k_out_channels = in_channels num_layers = num_layers - 1 for i in range(num_layers): in_channels = k_in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size if is_middle_block and (i == num_layers - 1): conv_2d_out_channels = k_out_channels else: conv_2d_out_channels = None resnets.append( ResnetBlockCondNorm2D( in_channels=in_channels, out_channels=out_channels, conv_2d_out_channels=conv_2d_out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=groups, groups_out=groups_out, dropout=dropout, non_linearity=resnet_act_fn, time_embedding_norm="ada_group", conv_shortcut_bias=False, ) ) attentions.append( KAttentionBlock( k_out_channels if (i == num_layers - 1) else out_channels, k_out_channels // attention_head_dim if (i == num_layers - 1) else out_channels // attention_head_dim, attention_head_dim, cross_attention_dim=cross_attention_dim, temb_channels=temb_channels, attention_bias=True, add_self_attention=add_self_attention, cross_attention_norm="layer_norm", upcast_attention=upcast_attention, ) ) self.resnets = nn.ModuleList(resnets) self.attentions = nn.ModuleList(attentions) if add_upsample: self.upsamplers = nn.ModuleList([KUpsample2D()]) else: self.upsamplers = None self.gradient_checkpointing = False self.resolution_idx = resolution_idx def forward( self, hidden_states: torch.Tensor, res_hidden_states_tuple: Tuple[torch.Tensor, ...], temb: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: res_hidden_states_tuple = res_hidden_states_tuple[-1] if res_hidden_states_tuple is not None: hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1) for resnet, attn in zip(self.resnets, self.attentions): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( resnet, hidden_states, temb, ) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states # can potentially later be renamed to `No-feed-forward` attention class KAttentionBlock(nn.Module): r""" A basic Transformer block. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. attention_bias (`bool`, *optional*, defaults to `False`): Configure if the attention layers should contain a bias parameter. upcast_attention (`bool`, *optional*, defaults to `False`): Set to `True` to upcast the attention computation to `float32`. temb_channels (`int`, *optional*, defaults to 768): The number of channels in the token embedding. add_self_attention (`bool`, *optional*, defaults to `False`): Set to `True` to add self-attention to the block. cross_attention_norm (`str`, *optional*, defaults to `None`): The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`. group_size (`int`, *optional*, defaults to 32): The number of groups to separate the channels into for group normalization. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout: float = 0.0, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, upcast_attention: bool = False, temb_channels: int = 768, # for ada_group_norm add_self_attention: bool = False, cross_attention_norm: Optional[str] = None, group_size: int = 32, ): super().__init__() self.add_self_attention = add_self_attention # 1. Self-Attn if add_self_attention: self.norm1 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size)) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=None, cross_attention_norm=None, ) # 2. Cross-Attn self.norm2 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size)) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, cross_attention_norm=cross_attention_norm, ) def _to_3d(self, hidden_states: torch.Tensor, height: int, weight: int) -> torch.Tensor: return hidden_states.permute(0, 2, 3, 1).reshape(hidden_states.shape[0], height * weight, -1) def _to_4d(self, hidden_states: torch.Tensor, height: int, weight: int) -> torch.Tensor: return hidden_states.permute(0, 2, 1).reshape(hidden_states.shape[0], -1, height, weight) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, # TODO: mark emb as non-optional (self.norm2 requires it). # requires assessing impact of change to positional param interface. emb: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") # 1. Self-Attention if self.add_self_attention: norm_hidden_states = self.norm1(hidden_states, emb) height, weight = norm_hidden_states.shape[2:] norm_hidden_states = self._to_3d(norm_hidden_states, height, weight) attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=None, attention_mask=attention_mask, **cross_attention_kwargs, ) attn_output = self._to_4d(attn_output, height, weight) hidden_states = attn_output + hidden_states # 2. Cross-Attention/None norm_hidden_states = self.norm2(hidden_states, emb) height, weight = norm_hidden_states.shape[2:] norm_hidden_states = self._to_3d(norm_hidden_states, height, weight) attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask if encoder_hidden_states is None else encoder_attention_mask, **cross_attention_kwargs, ) attn_output = self._to_4d(attn_output, height, weight) hidden_states = attn_output + hidden_states return hidden_states
diffusers/src/diffusers/models/unets/unet_2d_blocks.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_2d_blocks.py", "repo_id": "diffusers", "token_count": 74343 }
167
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import time from collections import OrderedDict from itertools import combinations from typing import Any, Dict, List, Optional, Union import torch from ..hooks import ModelHook from ..utils import ( is_accelerate_available, logging, ) if is_accelerate_available(): from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.state import PartialState from accelerate.utils import send_to_device from accelerate.utils.memory import clear_device_cache from accelerate.utils.modeling import convert_file_size_to_int logger = logging.get_logger(__name__) # pylint: disable=invalid-name class CustomOffloadHook(ModelHook): """ A hook that offloads a model on the CPU until its forward pass is called. It ensures the model and its inputs are on the given device. Optionally offloads other models to the CPU before the forward pass is called. Args: execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. """ no_grad = False def __init__( self, execution_device: Optional[Union[str, int, torch.device]] = None, other_hooks: Optional[List["UserCustomOffloadHook"]] = None, offload_strategy: Optional["AutoOffloadStrategy"] = None, ): self.execution_device = execution_device if execution_device is not None else PartialState().default_device self.other_hooks = other_hooks self.offload_strategy = offload_strategy self.model_id = None def set_strategy(self, offload_strategy: "AutoOffloadStrategy"): self.offload_strategy = offload_strategy def add_other_hook(self, hook: "UserCustomOffloadHook"): """ Add a hook to the list of hooks to consider for offloading. """ if self.other_hooks is None: self.other_hooks = [] self.other_hooks.append(hook) def init_hook(self, module): return module.to("cpu") def pre_forward(self, module, *args, **kwargs): if module.device != self.execution_device: if self.other_hooks is not None: hooks_to_offload = [hook for hook in self.other_hooks if hook.model.device == self.execution_device] # offload all other hooks start_time = time.perf_counter() if self.offload_strategy is not None: hooks_to_offload = self.offload_strategy( hooks=hooks_to_offload, model_id=self.model_id, model=module, execution_device=self.execution_device, ) end_time = time.perf_counter() logger.info( f" time taken to apply offload strategy for {self.model_id}: {(end_time - start_time):.2f} seconds" ) for hook in hooks_to_offload: logger.info( f"moving {self.model_id} to {self.execution_device}, offloading {hook.model_id} to cpu" ) hook.offload() if hooks_to_offload: clear_device_cache() module.to(self.execution_device) return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device) class UserCustomOffloadHook: """ A simple hook grouping a model and a `CustomOffloadHook`, which provides easy APIs for to call the init method of the hook or remove it entirely. """ def __init__(self, model_id, model, hook): self.model_id = model_id self.model = model self.hook = hook def offload(self): self.hook.init_hook(self.model) def attach(self): add_hook_to_module(self.model, self.hook) self.hook.model_id = self.model_id def remove(self): remove_hook_from_module(self.model) self.hook.model_id = None def add_other_hook(self, hook: "UserCustomOffloadHook"): self.hook.add_other_hook(hook) def custom_offload_with_hook( model_id: str, model: torch.nn.Module, execution_device: Union[str, int, torch.device] = None, offload_strategy: Optional["AutoOffloadStrategy"] = None, ): hook = CustomOffloadHook(execution_device=execution_device, offload_strategy=offload_strategy) user_hook = UserCustomOffloadHook(model_id=model_id, model=model, hook=hook) user_hook.attach() return user_hook # this is the class that user can customize to implement their own offload strategy class AutoOffloadStrategy: """ Offload strategy that should be used with `CustomOffloadHook` to automatically offload models to the CPU based on the available memory on the device. """ # YiYi TODO: instead of memory_reserve_margin, we should let user set the maximum_total_models_size to keep on device # the actual memory usage would be higher. But it's simpler this way, and can be tested def __init__(self, memory_reserve_margin="3GB"): self.memory_reserve_margin = convert_file_size_to_int(memory_reserve_margin) def __call__(self, hooks, model_id, model, execution_device): if len(hooks) == 0: return [] current_module_size = model.get_memory_footprint() mem_on_device = torch.cuda.mem_get_info(execution_device.index)[0] mem_on_device = mem_on_device - self.memory_reserve_margin if current_module_size < mem_on_device: return [] min_memory_offload = current_module_size - mem_on_device logger.info(f" search for models to offload in order to free up {min_memory_offload / 1024**3:.2f} GB memory") # exlucde models that's not currently loaded on the device module_sizes = dict( sorted( {hook.model_id: hook.model.get_memory_footprint() for hook in hooks}.items(), key=lambda x: x[1], reverse=True, ) ) # YiYi/Dhruv TODO: sort smallest to largest, and offload in that order we would tend to keep the larger models on GPU more often def search_best_candidate(module_sizes, min_memory_offload): """ search the optimal combination of models to offload to cpu, given a dictionary of module sizes and a minimum memory offload size. the combination of models should add up to the smallest modulesize that is larger than `min_memory_offload` """ model_ids = list(module_sizes.keys()) best_candidate = None best_size = float("inf") for r in range(1, len(model_ids) + 1): for candidate_model_ids in combinations(model_ids, r): candidate_size = sum( module_sizes[candidate_model_id] for candidate_model_id in candidate_model_ids ) if candidate_size < min_memory_offload: continue else: if best_candidate is None or candidate_size < best_size: best_candidate = candidate_model_ids best_size = candidate_size return best_candidate best_offload_model_ids = search_best_candidate(module_sizes, min_memory_offload) if best_offload_model_ids is None: # if no combination is found, meaning that we cannot meet the memory requirement, offload all models logger.warning("no combination of models to offload to cpu is found, offloading all models") hooks_to_offload = hooks else: hooks_to_offload = [hook for hook in hooks if hook.model_id in best_offload_model_ids] return hooks_to_offload # utils for display component info in a readable format # TODO: move to a different file def summarize_dict_by_value_and_parts(d: Dict[str, Any]) -> Dict[str, Any]: """Summarizes a dictionary by finding common prefixes that share the same value. For a dictionary with dot-separated keys like: { 'down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor': [0.6], 'down_blocks.1.attentions.1.transformer_blocks.1.attn2.processor': [0.6], 'up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor': [0.3], } Returns a dictionary where keys are the shortest common prefixes and values are their shared values: { 'down_blocks': [0.6], 'up_blocks': [0.3] } """ # First group by values - convert lists to tuples to make them hashable value_to_keys = {} for key, value in d.items(): value_tuple = tuple(value) if isinstance(value, list) else value if value_tuple not in value_to_keys: value_to_keys[value_tuple] = [] value_to_keys[value_tuple].append(key) def find_common_prefix(keys: List[str]) -> str: """Find the shortest common prefix among a list of dot-separated keys.""" if not keys: return "" if len(keys) == 1: return keys[0] # Split all keys into parts key_parts = [k.split(".") for k in keys] # Find how many initial parts are common common_length = 0 for parts in zip(*key_parts): if len(set(parts)) == 1: # All parts at this position are the same common_length += 1 else: break if common_length == 0: return "" # Return the common prefix return ".".join(key_parts[0][:common_length]) # Create summary by finding common prefixes for each value group summary = {} for value_tuple, keys in value_to_keys.items(): prefix = find_common_prefix(keys) if prefix: # Only add if we found a common prefix # Convert tuple back to list if it was originally a list value = list(value_tuple) if isinstance(d[keys[0]], list) else value_tuple summary[prefix] = value else: summary[""] = value # Use empty string if no common prefix return summary class ComponentsManager: """ A central registry and management system for model components across multiple pipelines. [`ComponentsManager`] provides a unified way to register, track, and reuse model components (like UNet, VAE, text encoders, etc.) across different modular pipelines. It includes features for duplicate detection, memory management, and component organization. <Tip warning={true}> This is an experimental feature and is likely to change in the future. </Tip> Example: ```python from diffusers import ComponentsManager # Create a components manager cm = ComponentsManager() # Add components cm.add("unet", unet_model, collection="sdxl") cm.add("vae", vae_model, collection="sdxl") # Enable auto offloading cm.enable_auto_cpu_offload(device="cuda") # Retrieve components unet = cm.get_one(name="unet", collection="sdxl") ``` """ _available_info_fields = [ "model_id", "added_time", "collection", "class_name", "size_gb", "adapters", "has_hook", "execution_device", "ip_adapter", ] def __init__(self): self.components = OrderedDict() # YiYi TODO: can remove once confirm we don't need this in mellon self.added_time = OrderedDict() # Store when components were added self.collections = OrderedDict() # collection_name -> set of component_names self.model_hooks = None self._auto_offload_enabled = False def _lookup_ids( self, name: Optional[str] = None, collection: Optional[str] = None, load_id: Optional[str] = None, components: Optional[OrderedDict] = None, ): """ Lookup component_ids by name, collection, or load_id. Does not support pattern matching. Returns a set of component_ids """ if components is None: components = self.components if name: ids_by_name = set() for component_id, component in components.items(): comp_name = self._id_to_name(component_id) if comp_name == name: ids_by_name.add(component_id) else: ids_by_name = set(components.keys()) if collection: ids_by_collection = set() for component_id, component in components.items(): if component_id in self.collections[collection]: ids_by_collection.add(component_id) else: ids_by_collection = set(components.keys()) if load_id: ids_by_load_id = set() for name, component in components.items(): if hasattr(component, "_diffusers_load_id") and component._diffusers_load_id == load_id: ids_by_load_id.add(name) else: ids_by_load_id = set(components.keys()) ids = ids_by_name.intersection(ids_by_collection).intersection(ids_by_load_id) return ids @staticmethod def _id_to_name(component_id: str): return "_".join(component_id.split("_")[:-1]) def add(self, name: str, component: Any, collection: Optional[str] = None): """ Add a component to the ComponentsManager. Args: name (str): The name of the component component (Any): The component to add collection (Optional[str]): The collection to add the component to Returns: str: The unique component ID, which is generated as "{name}_{id(component)}" where id(component) is Python's built-in unique identifier for the object """ component_id = f"{name}_{id(component)}" is_new_component = True # check for duplicated components for comp_id, comp in self.components.items(): if comp == component: comp_name = self._id_to_name(comp_id) if comp_name == name: logger.warning(f"ComponentsManager: component '{name}' already exists as '{comp_id}'") component_id = comp_id is_new_component = False break else: logger.warning( f"ComponentsManager: adding component '{name}' as '{component_id}', but it is duplicate of '{comp_id}'" f"To remove a duplicate, call `components_manager.remove('<component_id>')`." ) # check for duplicated load_id and warn (we do not delete for you) if hasattr(component, "_diffusers_load_id") and component._diffusers_load_id != "null": components_with_same_load_id = self._lookup_ids(load_id=component._diffusers_load_id) components_with_same_load_id = [id for id in components_with_same_load_id if id != component_id] if components_with_same_load_id: existing = ", ".join(components_with_same_load_id) logger.warning( f"ComponentsManager: adding component '{component_id}', but it has duplicate load_id '{component._diffusers_load_id}' with existing components: {existing}. " f"To remove a duplicate, call `components_manager.remove('<component_id>')`." ) # add component to components manager self.components[component_id] = component self.added_time[component_id] = time.time() if collection: if collection not in self.collections: self.collections[collection] = set() if component_id not in self.collections[collection]: comp_ids_in_collection = self._lookup_ids(name=name, collection=collection) for comp_id in comp_ids_in_collection: logger.warning( f"ComponentsManager: removing existing {name} from collection '{collection}': {comp_id}" ) # remove existing component from this collection (if it is not in any other collection, will be removed from ComponentsManager) self.remove_from_collection(comp_id, collection) self.collections[collection].add(component_id) logger.info( f"ComponentsManager: added component '{name}' in collection '{collection}': {component_id}" ) else: logger.info(f"ComponentsManager: added component '{name}' as '{component_id}'") if self._auto_offload_enabled and is_new_component: self.enable_auto_cpu_offload(self._auto_offload_device) return component_id def remove_from_collection(self, component_id: str, collection: str): """ Remove a component from a collection. """ if collection not in self.collections: logger.warning(f"Collection '{collection}' not found in ComponentsManager") return if component_id not in self.collections[collection]: logger.warning(f"Component '{component_id}' not found in collection '{collection}'") return # remove from the collection self.collections[collection].remove(component_id) # check if this component is in any other collection comp_colls = [coll for coll, comps in self.collections.items() if component_id in comps] if not comp_colls: # only if no other collection contains this component, remove it logger.warning(f"ComponentsManager: removing component '{component_id}' from ComponentsManager") self.remove(component_id) def remove(self, component_id: str = None): """ Remove a component from the ComponentsManager. Args: component_id (str): The ID of the component to remove """ if component_id not in self.components: logger.warning(f"Component '{component_id}' not found in ComponentsManager") return component = self.components.pop(component_id) self.added_time.pop(component_id) for collection in self.collections: if component_id in self.collections[collection]: self.collections[collection].remove(component_id) if self._auto_offload_enabled: self.enable_auto_cpu_offload(self._auto_offload_device) else: if isinstance(component, torch.nn.Module): component.to("cpu") del component import gc gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() # YiYi TODO: rename to search_components for now, may remove this method def search_components( self, names: Optional[str] = None, collection: Optional[str] = None, load_id: Optional[str] = None, return_dict_with_names: bool = True, ): """ Search components by name with simple pattern matching. Optionally filter by collection or load_id. Args: names: Component name(s) or pattern(s) Patterns: - "unet" : match any component with base name "unet" (e.g., unet_123abc) - "!unet" : everything except components with base name "unet" - "unet*" : anything with base name starting with "unet" - "!unet*" : anything with base name NOT starting with "unet" - "*unet*" : anything with base name containing "unet" - "!*unet*" : anything with base name NOT containing "unet" - "refiner|vae|unet" : anything with base name exactly matching "refiner", "vae", or "unet" - "!refiner|vae|unet" : anything with base name NOT exactly matching "refiner", "vae", or "unet" - "unet*|vae*" : anything with base name starting with "unet" OR starting with "vae" collection: Optional collection to filter by load_id: Optional load_id to filter by return_dict_with_names: If True, returns a dictionary with component names as keys, throw an error if multiple components with the same name are found If False, returns a dictionary with component IDs as keys Returns: Dictionary mapping component names to components if return_dict_with_names=True, or a dictionary mapping component IDs to components if return_dict_with_names=False """ # select components based on collection and load_id filters selected_ids = self._lookup_ids(collection=collection, load_id=load_id) components = {k: self.components[k] for k in selected_ids} def get_return_dict(components, return_dict_with_names): """ Create a dictionary mapping component names to components if return_dict_with_names=True, or a dictionary mapping component IDs to components if return_dict_with_names=False, throw an error if duplicate component names are found when return_dict_with_names=True """ if return_dict_with_names: dict_to_return = {} for comp_id, comp in components.items(): comp_name = self._id_to_name(comp_id) if comp_name in dict_to_return: raise ValueError( f"Duplicate component names found in the search results: {comp_name}, please set `return_dict_with_names=False` to return a dictionary with component IDs as keys" ) dict_to_return[comp_name] = comp return dict_to_return else: return components # if no names are provided, return the filtered components as it is if names is None: return get_return_dict(components, return_dict_with_names) # if names is not a string, raise an error elif not isinstance(names, str): raise ValueError(f"Invalid type for `names: {type(names)}, only support string") # Create mapping from component_id to base_name for components to be used for pattern matching base_names = {comp_id: self._id_to_name(comp_id) for comp_id in components.keys()} # Helper function to check if a component matches a pattern based on its base name def matches_pattern(component_id, pattern, exact_match=False): """ Helper function to check if a component matches a pattern based on its base name. Args: component_id: The component ID to check pattern: The pattern to match against exact_match: If True, only exact matches to base_name are considered """ base_name = base_names[component_id] # Exact match with base name if exact_match: return pattern == base_name # Prefix match (ends with *) elif pattern.endswith("*"): prefix = pattern[:-1] return base_name.startswith(prefix) # Contains match (starts with *) elif pattern.startswith("*"): search = pattern[1:-1] if pattern.endswith("*") else pattern[1:] return search in base_name # Exact match (no wildcards) else: return pattern == base_name # Check if this is a "not" pattern is_not_pattern = names.startswith("!") if is_not_pattern: names = names[1:] # Remove the ! prefix # Handle OR patterns (containing |) if "|" in names: terms = names.split("|") matches = {} for comp_id, comp in components.items(): # For OR patterns with exact names (no wildcards), we do exact matching on base names exact_match = all(not (term.startswith("*") or term.endswith("*")) for term in terms) # Check if any of the terms match this component should_include = any(matches_pattern(comp_id, term, exact_match) for term in terms) # Flip the decision if this is a NOT pattern if is_not_pattern: should_include = not should_include if should_include: matches[comp_id] = comp log_msg = "NOT " if is_not_pattern else "" match_type = "exactly matching" if exact_match else "matching any of patterns" logger.info(f"Getting components {log_msg}{match_type} {terms}: {list(matches.keys())}") # Try exact match with a base name elif any(names == base_name for base_name in base_names.values()): # Find all components with this base name matches = { comp_id: comp for comp_id, comp in components.items() if (base_names[comp_id] == names) != is_not_pattern } if is_not_pattern: logger.info(f"Getting all components except those with base name '{names}': {list(matches.keys())}") else: logger.info(f"Getting components with base name '{names}': {list(matches.keys())}") # Prefix match (ends with *) elif names.endswith("*"): prefix = names[:-1] matches = { comp_id: comp for comp_id, comp in components.items() if base_names[comp_id].startswith(prefix) != is_not_pattern } if is_not_pattern: logger.info(f"Getting components NOT starting with '{prefix}': {list(matches.keys())}") else: logger.info(f"Getting components starting with '{prefix}': {list(matches.keys())}") # Contains match (starts with *) elif names.startswith("*"): search = names[1:-1] if names.endswith("*") else names[1:] matches = { comp_id: comp for comp_id, comp in components.items() if (search in base_names[comp_id]) != is_not_pattern } if is_not_pattern: logger.info(f"Getting components NOT containing '{search}': {list(matches.keys())}") else: logger.info(f"Getting components containing '{search}': {list(matches.keys())}") # Substring match (no wildcards, but not an exact component name) elif any(names in base_name for base_name in base_names.values()): matches = { comp_id: comp for comp_id, comp in components.items() if (names in base_names[comp_id]) != is_not_pattern } if is_not_pattern: logger.info(f"Getting components NOT containing '{names}': {list(matches.keys())}") else: logger.info(f"Getting components containing '{names}': {list(matches.keys())}") else: raise ValueError(f"Component or pattern '{names}' not found in ComponentsManager") if not matches: raise ValueError(f"No components found matching pattern '{names}'") return get_return_dict(matches, return_dict_with_names) def enable_auto_cpu_offload(self, device: Union[str, int, torch.device] = "cuda", memory_reserve_margin="3GB"): """ Enable automatic CPU offloading for all components. The algorithm works as follows: 1. All models start on CPU by default 2. When a model's forward pass is called, it's moved to the execution device 3. If there's insufficient memory, other models on the device are moved back to CPU 4. The system tries to offload the smallest combination of models that frees enough memory 5. Models stay on the execution device until another model needs memory and forces them off Args: device (Union[str, int, torch.device]): The execution device where models are moved for forward passes memory_reserve_margin (str): The memory reserve margin to use, default is 3GB. This is the amount of memory to keep free on the device to avoid running out of memory during model execution (e.g., for intermediate activations, gradients, etc.) """ if not is_accelerate_available(): raise ImportError("Make sure to install accelerate to use auto_cpu_offload") for name, component in self.components.items(): if isinstance(component, torch.nn.Module) and hasattr(component, "_hf_hook"): remove_hook_from_module(component, recurse=True) self.disable_auto_cpu_offload() offload_strategy = AutoOffloadStrategy(memory_reserve_margin=memory_reserve_margin) device = torch.device(device) if device.index is None: device = torch.device(f"{device.type}:{0}") all_hooks = [] for name, component in self.components.items(): if isinstance(component, torch.nn.Module): hook = custom_offload_with_hook(name, component, device, offload_strategy=offload_strategy) all_hooks.append(hook) for hook in all_hooks: other_hooks = [h for h in all_hooks if h is not hook] for other_hook in other_hooks: if other_hook.hook.execution_device == hook.hook.execution_device: hook.add_other_hook(other_hook) self.model_hooks = all_hooks self._auto_offload_enabled = True self._auto_offload_device = device def disable_auto_cpu_offload(self): """ Disable automatic CPU offloading for all components. """ if self.model_hooks is None: self._auto_offload_enabled = False return for hook in self.model_hooks: hook.offload() hook.remove() if self.model_hooks: clear_device_cache() self.model_hooks = None self._auto_offload_enabled = False # YiYi TODO: (1) add quantization info def get_model_info( self, component_id: str, fields: Optional[Union[str, List[str]]] = None, ) -> Optional[Dict[str, Any]]: """Get comprehensive information about a component. Args: component_id (str): Name of the component to get info for fields (Optional[Union[str, List[str]]]): Field(s) to return. Can be a string for single field or list of fields. If None, uses the available_info_fields setting. Returns: Dictionary containing requested component metadata. If fields is specified, returns only those fields. Otherwise, returns all fields. """ if component_id not in self.components: raise ValueError(f"Component '{component_id}' not found in ComponentsManager") component = self.components[component_id] # Validate fields if specified if fields is not None: if isinstance(fields, str): fields = [fields] for field in fields: if field not in self._available_info_fields: raise ValueError(f"Field '{field}' not found in available_info_fields") # Build complete info dict first info = { "model_id": component_id, "added_time": self.added_time[component_id], "collection": ", ".join([coll for coll, comps in self.collections.items() if component_id in comps]) or None, } # Additional info for torch.nn.Module components if isinstance(component, torch.nn.Module): # Check for hook information has_hook = hasattr(component, "_hf_hook") execution_device = None if has_hook and hasattr(component._hf_hook, "execution_device"): execution_device = component._hf_hook.execution_device info.update( { "class_name": component.__class__.__name__, "size_gb": component.get_memory_footprint() / (1024**3), "adapters": None, # Default to None "has_hook": has_hook, "execution_device": execution_device, } ) # Get adapters if applicable if hasattr(component, "peft_config"): info["adapters"] = list(component.peft_config.keys()) # Check for IP-Adapter scales if hasattr(component, "_load_ip_adapter_weights") and hasattr(component, "attn_processors"): processors = copy.deepcopy(component.attn_processors) # First check if any processor is an IP-Adapter processor_types = [v.__class__.__name__ for v in processors.values()] if any("IPAdapter" in ptype for ptype in processor_types): # Then get scales only from IP-Adapter processors scales = { k: v.scale for k, v in processors.items() if hasattr(v, "scale") and "IPAdapter" in v.__class__.__name__ } if scales: info["ip_adapter"] = summarize_dict_by_value_and_parts(scales) # If fields specified, filter info if fields is not None: return {k: v for k, v in info.items() if k in fields} else: return info # YiYi TODO: (1) add display fields, allow user to set which fields to display in the comnponents table def __repr__(self): # Handle empty components case if not self.components: return "Components:\n" + "=" * 50 + "\nNo components registered.\n" + "=" * 50 # Extract load_id if available def get_load_id(component): if hasattr(component, "_diffusers_load_id"): return component._diffusers_load_id return "N/A" # Format device info compactly def format_device(component, info): if not info["has_hook"]: return str(getattr(component, "device", "N/A")) else: device = str(getattr(component, "device", "N/A")) exec_device = str(info["execution_device"] or "N/A") return f"{device}({exec_device})" # Get max length of load_ids for models load_ids = [ get_load_id(component) for component in self.components.values() if isinstance(component, torch.nn.Module) and hasattr(component, "_diffusers_load_id") ] max_load_id_len = max([15] + [len(str(lid)) for lid in load_ids]) if load_ids else 15 # Get all collections for each component component_collections = {} for name in self.components.keys(): component_collections[name] = [] for coll, comps in self.collections.items(): if name in comps: component_collections[name].append(coll) if not component_collections[name]: component_collections[name] = ["N/A"] # Find the maximum collection name length all_collections = [coll for colls in component_collections.values() for coll in colls] max_collection_len = max(10, max(len(str(c)) for c in all_collections)) if all_collections else 10 col_widths = { "id": max(15, max(len(name) for name in self.components.keys())), "class": max(25, max(len(component.__class__.__name__) for component in self.components.values())), "device": 20, "dtype": 15, "size": 10, "load_id": max_load_id_len, "collection": max_collection_len, } # Create the header lines sep_line = "=" * (sum(col_widths.values()) + len(col_widths) * 3 - 1) + "\n" dash_line = "-" * (sum(col_widths.values()) + len(col_widths) * 3 - 1) + "\n" output = "Components:\n" + sep_line # Separate components into models and others models = {k: v for k, v in self.components.items() if isinstance(v, torch.nn.Module)} others = {k: v for k, v in self.components.items() if not isinstance(v, torch.nn.Module)} # Models section if models: output += "Models:\n" + dash_line # Column headers output += f"{'Name_ID':<{col_widths['id']}} | {'Class':<{col_widths['class']}} | " output += f"{'Device: act(exec)':<{col_widths['device']}} | {'Dtype':<{col_widths['dtype']}} | " output += f"{'Size (GB)':<{col_widths['size']}} | {'Load ID':<{col_widths['load_id']}} | Collection\n" output += dash_line # Model entries for name, component in models.items(): info = self.get_model_info(name) device_str = format_device(component, info) dtype = str(component.dtype) if hasattr(component, "dtype") else "N/A" load_id = get_load_id(component) # Print first collection on the main line first_collection = component_collections[name][0] if component_collections[name] else "N/A" output += f"{name:<{col_widths['id']}} | {info['class_name']:<{col_widths['class']}} | " output += f"{device_str:<{col_widths['device']}} | {dtype:<{col_widths['dtype']}} | " output += f"{info['size_gb']:<{col_widths['size']}.2f} | {load_id:<{col_widths['load_id']}} | {first_collection}\n" # Print additional collections on separate lines if they exist for i in range(1, len(component_collections[name])): collection = component_collections[name][i] output += f"{'':<{col_widths['id']}} | {'':<{col_widths['class']}} | " output += f"{'':<{col_widths['device']}} | {'':<{col_widths['dtype']}} | " output += f"{'':<{col_widths['size']}} | {'':<{col_widths['load_id']}} | {collection}\n" output += dash_line # Other components section if others: if models: # Add extra newline if we had models section output += "\n" output += "Other Components:\n" + dash_line # Column headers for other components output += f"{'ID':<{col_widths['id']}} | {'Class':<{col_widths['class']}} | Collection\n" output += dash_line # Other component entries for name, component in others.items(): info = self.get_model_info(name) # Print first collection on the main line first_collection = component_collections[name][0] if component_collections[name] else "N/A" output += f"{name:<{col_widths['id']}} | {component.__class__.__name__:<{col_widths['class']}} | {first_collection}\n" # Print additional collections on separate lines if they exist for i in range(1, len(component_collections[name])): collection = component_collections[name][i] output += f"{'':<{col_widths['id']}} | {'':<{col_widths['class']}} | {collection}\n" output += dash_line # Add additional component info output += "\nAdditional Component Info:\n" + "=" * 50 + "\n" for name in self.components: info = self.get_model_info(name) if info is not None and (info.get("adapters") is not None or info.get("ip_adapter")): output += f"\n{name}:\n" if info.get("adapters") is not None: output += f" Adapters: {info['adapters']}\n" if info.get("ip_adapter"): output += " IP-Adapter: Enabled\n" return output def get_one( self, component_id: Optional[str] = None, name: Optional[str] = None, collection: Optional[str] = None, load_id: Optional[str] = None, ) -> Any: """ Get a single component by either: - searching name (pattern matching), collection, or load_id. - passing in a component_id Raises an error if multiple components match or none are found. Args: component_id (Optional[str]): Optional component ID to get name (Optional[str]): Component name or pattern collection (Optional[str]): Optional collection to filter by load_id (Optional[str]): Optional load_id to filter by Returns: A single component Raises: ValueError: If no components match or multiple components match """ if component_id is not None and (name is not None or collection is not None or load_id is not None): raise ValueError("If searching by component_id, do not pass name, collection, or load_id") # search by component_id if component_id is not None: if component_id not in self.components: raise ValueError(f"Component '{component_id}' not found in ComponentsManager") return self.components[component_id] # search with name/collection/load_id results = self.search_components(name, collection, load_id) if not results: raise ValueError(f"No components found matching '{name}'") if len(results) > 1: raise ValueError(f"Multiple components found matching '{name}': {list(results.keys())}") return next(iter(results.values())) def get_ids(self, names: Union[str, List[str]] = None, collection: Optional[str] = None): """ Get component IDs by a list of names, optionally filtered by collection. Args: names (Union[str, List[str]]): List of component names collection (Optional[str]): Optional collection to filter by Returns: List[str]: List of component IDs """ ids = set() if not isinstance(names, list): names = [names] for name in names: ids.update(self._lookup_ids(name=name, collection=collection)) return list(ids) def get_components_by_ids(self, ids: List[str], return_dict_with_names: Optional[bool] = True): """ Get components by a list of IDs. Args: ids (List[str]): List of component IDs return_dict_with_names (Optional[bool]): Whether to return a dictionary with component names as keys: Returns: Dict[str, Any]: Dictionary of components. - If return_dict_with_names=True, keys are component names. - If return_dict_with_names=False, keys are component IDs. Raises: ValueError: If duplicate component names are found in the search results when return_dict_with_names=True """ components = {id: self.components[id] for id in ids} if return_dict_with_names: dict_to_return = {} for comp_id, comp in components.items(): comp_name = self._id_to_name(comp_id) if comp_name in dict_to_return: raise ValueError( f"Duplicate component names found in the search results: {comp_name}, please set `return_dict_with_names=False` to return a dictionary with component IDs as keys" ) dict_to_return[comp_name] = comp return dict_to_return else: return components def get_components_by_names(self, names: List[str], collection: Optional[str] = None): """ Get components by a list of names, optionally filtered by collection. Args: names (List[str]): List of component names collection (Optional[str]): Optional collection to filter by Returns: Dict[str, Any]: Dictionary of components with component names as keys Raises: ValueError: If duplicate component names are found in the search results """ ids = self.get_ids(names, collection) return self.get_components_by_ids(ids)
diffusers/src/diffusers/modular_pipelines/components_manager.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/components_manager.py", "repo_id": "diffusers", "token_count": 20006 }
168
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...utils import logging from ..modular_pipeline import AutoPipelineBlocks, SequentialPipelineBlocks from ..modular_pipeline_utils import InsertableDict from .before_denoise import ( StableDiffusionXLControlNetInputStep, StableDiffusionXLControlNetUnionInputStep, StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep, StableDiffusionXLImg2ImgPrepareLatentsStep, StableDiffusionXLImg2ImgSetTimestepsStep, StableDiffusionXLInpaintPrepareLatentsStep, StableDiffusionXLInputStep, StableDiffusionXLPrepareAdditionalConditioningStep, StableDiffusionXLPrepareLatentsStep, StableDiffusionXLSetTimestepsStep, ) from .decoders import ( StableDiffusionXLDecodeStep, StableDiffusionXLInpaintOverlayMaskStep, ) from .denoise import ( StableDiffusionXLControlNetDenoiseStep, StableDiffusionXLDenoiseStep, StableDiffusionXLInpaintControlNetDenoiseStep, StableDiffusionXLInpaintDenoiseStep, ) from .encoders import ( StableDiffusionXLInpaintVaeEncoderStep, StableDiffusionXLIPAdapterStep, StableDiffusionXLTextEncoderStep, StableDiffusionXLVaeEncoderStep, ) logger = logging.get_logger(__name__) # pylint: disable=invalid-name # auto blocks & sequential blocks & mappings # vae encoder (run before before_denoise) class StableDiffusionXLAutoVaeEncoderStep(AutoPipelineBlocks): block_classes = [StableDiffusionXLInpaintVaeEncoderStep, StableDiffusionXLVaeEncoderStep] block_names = ["inpaint", "img2img"] block_trigger_inputs = ["mask_image", "image"] @property def description(self): return ( "Vae encoder step that encode the image inputs into their latent representations.\n" + "This is an auto pipeline block that works for both inpainting and img2img tasks.\n" + " - `StableDiffusionXLInpaintVaeEncoderStep` (inpaint) is used when `mask_image` is provided.\n" + " - `StableDiffusionXLVaeEncoderStep` (img2img) is used when only `image` is provided." + " - if neither `mask_image` nor `image` is provided, step will be skipped." ) # optional ip-adapter (run before input step) class StableDiffusionXLAutoIPAdapterStep(AutoPipelineBlocks): block_classes = [StableDiffusionXLIPAdapterStep] block_names = ["ip_adapter"] block_trigger_inputs = ["ip_adapter_image"] @property def description(self): return "Run IP Adapter step if `ip_adapter_image` is provided. This step should be placed before the 'input' step.\n" # before_denoise: text2img class StableDiffusionXLBeforeDenoiseStep(SequentialPipelineBlocks): block_classes = [ StableDiffusionXLInputStep, StableDiffusionXLSetTimestepsStep, StableDiffusionXLPrepareLatentsStep, StableDiffusionXLPrepareAdditionalConditioningStep, ] block_names = ["input", "set_timesteps", "prepare_latents", "prepare_add_cond"] @property def description(self): return ( "Before denoise step that prepare the inputs for the denoise step.\n" + "This is a sequential pipeline blocks:\n" + " - `StableDiffusionXLInputStep` is used to adjust the batch size of the model inputs\n" + " - `StableDiffusionXLSetTimestepsStep` is used to set the timesteps\n" + " - `StableDiffusionXLPrepareLatentsStep` is used to prepare the latents\n" + " - `StableDiffusionXLPrepareAdditionalConditioningStep` is used to prepare the additional conditioning\n" ) # before_denoise: img2img class StableDiffusionXLImg2ImgBeforeDenoiseStep(SequentialPipelineBlocks): block_classes = [ StableDiffusionXLInputStep, StableDiffusionXLImg2ImgSetTimestepsStep, StableDiffusionXLImg2ImgPrepareLatentsStep, StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep, ] block_names = ["input", "set_timesteps", "prepare_latents", "prepare_add_cond"] @property def description(self): return ( "Before denoise step that prepare the inputs for the denoise step for img2img task.\n" + "This is a sequential pipeline blocks:\n" + " - `StableDiffusionXLInputStep` is used to adjust the batch size of the model inputs\n" + " - `StableDiffusionXLImg2ImgSetTimestepsStep` is used to set the timesteps\n" + " - `StableDiffusionXLImg2ImgPrepareLatentsStep` is used to prepare the latents\n" + " - `StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep` is used to prepare the additional conditioning\n" ) # before_denoise: inpainting class StableDiffusionXLInpaintBeforeDenoiseStep(SequentialPipelineBlocks): block_classes = [ StableDiffusionXLInputStep, StableDiffusionXLImg2ImgSetTimestepsStep, StableDiffusionXLInpaintPrepareLatentsStep, StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep, ] block_names = ["input", "set_timesteps", "prepare_latents", "prepare_add_cond"] @property def description(self): return ( "Before denoise step that prepare the inputs for the denoise step for inpainting task.\n" + "This is a sequential pipeline blocks:\n" + " - `StableDiffusionXLInputStep` is used to adjust the batch size of the model inputs\n" + " - `StableDiffusionXLImg2ImgSetTimestepsStep` is used to set the timesteps\n" + " - `StableDiffusionXLInpaintPrepareLatentsStep` is used to prepare the latents\n" + " - `StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep` is used to prepare the additional conditioning\n" ) # before_denoise: all task (text2img, img2img, inpainting) class StableDiffusionXLAutoBeforeDenoiseStep(AutoPipelineBlocks): block_classes = [ StableDiffusionXLInpaintBeforeDenoiseStep, StableDiffusionXLImg2ImgBeforeDenoiseStep, StableDiffusionXLBeforeDenoiseStep, ] block_names = ["inpaint", "img2img", "text2img"] block_trigger_inputs = ["mask", "image_latents", None] @property def description(self): return ( "Before denoise step that prepare the inputs for the denoise step.\n" + "This is an auto pipeline block that works for text2img, img2img and inpainting tasks as well as controlnet, controlnet_union.\n" + " - `StableDiffusionXLInpaintBeforeDenoiseStep` (inpaint) is used when both `mask` and `image_latents` are provided.\n" + " - `StableDiffusionXLImg2ImgBeforeDenoiseStep` (img2img) is used when only `image_latents` is provided.\n" + " - `StableDiffusionXLBeforeDenoiseStep` (text2img) is used when both `image_latents` and `mask` are not provided.\n" ) # optional controlnet input step (after before_denoise, before denoise) # works for both controlnet and controlnet_union class StableDiffusionXLAutoControlNetInputStep(AutoPipelineBlocks): block_classes = [StableDiffusionXLControlNetUnionInputStep, StableDiffusionXLControlNetInputStep] block_names = ["controlnet_union", "controlnet"] block_trigger_inputs = ["control_mode", "control_image"] @property def description(self): return ( "Controlnet Input step that prepare the controlnet input.\n" + "This is an auto pipeline block that works for both controlnet and controlnet_union.\n" + " (it should be called right before the denoise step)" + " - `StableDiffusionXLControlNetUnionInputStep` is called to prepare the controlnet input when `control_mode` and `control_image` are provided.\n" + " - `StableDiffusionXLControlNetInputStep` is called to prepare the controlnet input when `control_image` is provided." + " - if neither `control_mode` nor `control_image` is provided, step will be skipped." ) # denoise: controlnet (text2img, img2img, inpainting) class StableDiffusionXLAutoControlNetDenoiseStep(AutoPipelineBlocks): block_classes = [StableDiffusionXLInpaintControlNetDenoiseStep, StableDiffusionXLControlNetDenoiseStep] block_names = ["inpaint_controlnet_denoise", "controlnet_denoise"] block_trigger_inputs = ["mask", "controlnet_cond"] @property def description(self) -> str: return ( "Denoise step that iteratively denoise the latents with controlnet. " "This is a auto pipeline block that using controlnet for text2img, img2img and inpainting tasks." "This block should not be used without a controlnet_cond input" " - `StableDiffusionXLInpaintControlNetDenoiseStep` (inpaint_controlnet_denoise) is used when mask is provided." " - `StableDiffusionXLControlNetDenoiseStep` (controlnet_denoise) is used when mask is not provided but controlnet_cond is provided." " - If neither mask nor controlnet_cond are provided, step will be skipped." ) # denoise: all task with or without controlnet (text2img, img2img, inpainting) class StableDiffusionXLAutoDenoiseStep(AutoPipelineBlocks): block_classes = [ StableDiffusionXLAutoControlNetDenoiseStep, StableDiffusionXLInpaintDenoiseStep, StableDiffusionXLDenoiseStep, ] block_names = ["controlnet_denoise", "inpaint_denoise", "denoise"] block_trigger_inputs = ["controlnet_cond", "mask", None] @property def description(self) -> str: return ( "Denoise step that iteratively denoise the latents. " "This is a auto pipeline block that works for text2img, img2img and inpainting tasks. And can be used with or without controlnet." " - `StableDiffusionXLAutoControlNetDenoiseStep` (controlnet_denoise) is used when controlnet_cond is provided (support controlnet withtext2img, img2img and inpainting tasks)." " - `StableDiffusionXLInpaintDenoiseStep` (inpaint_denoise) is used when mask is provided (support inpainting tasks)." " - `StableDiffusionXLDenoiseStep` (denoise) is used when neither mask nor controlnet_cond are provided (support text2img and img2img tasks)." ) # decode: inpaint class StableDiffusionXLInpaintDecodeStep(SequentialPipelineBlocks): block_classes = [StableDiffusionXLDecodeStep, StableDiffusionXLInpaintOverlayMaskStep] block_names = ["decode", "mask_overlay"] @property def description(self): return ( "Inpaint decode step that decode the denoised latents into images outputs.\n" + "This is a sequential pipeline blocks:\n" + " - `StableDiffusionXLDecodeStep` is used to decode the denoised latents into images\n" + " - `StableDiffusionXLInpaintOverlayMaskStep` is used to overlay the mask on the image" ) # decode: all task (text2img, img2img, inpainting) class StableDiffusionXLAutoDecodeStep(AutoPipelineBlocks): block_classes = [StableDiffusionXLInpaintDecodeStep, StableDiffusionXLDecodeStep] block_names = ["inpaint", "non-inpaint"] block_trigger_inputs = ["padding_mask_crop", None] @property def description(self): return ( "Decode step that decode the denoised latents into images outputs.\n" + "This is an auto pipeline block that works for inpainting and non-inpainting tasks.\n" + " - `StableDiffusionXLInpaintDecodeStep` (inpaint) is used when `padding_mask_crop` is provided.\n" + " - `StableDiffusionXLDecodeStep` (non-inpaint) is used when `padding_mask_crop` is not provided." ) # ip-adapter, controlnet, text2img, img2img, inpainting class StableDiffusionXLAutoBlocks(SequentialPipelineBlocks): block_classes = [ StableDiffusionXLTextEncoderStep, StableDiffusionXLAutoIPAdapterStep, StableDiffusionXLAutoVaeEncoderStep, StableDiffusionXLAutoBeforeDenoiseStep, StableDiffusionXLAutoControlNetInputStep, StableDiffusionXLAutoDenoiseStep, StableDiffusionXLAutoDecodeStep, ] block_names = [ "text_encoder", "ip_adapter", "image_encoder", "before_denoise", "controlnet_input", "denoise", "decoder", ] @property def description(self): return ( "Auto Modular pipeline for text-to-image, image-to-image, inpainting, and controlnet tasks using Stable Diffusion XL.\n" + "- for image-to-image generation, you need to provide either `image` or `image_latents`\n" + "- for inpainting, you need to provide `mask_image` and `image`, optionally you can provide `padding_mask_crop` \n" + "- to run the controlnet workflow, you need to provide `control_image`\n" + "- to run the controlnet_union workflow, you need to provide `control_image` and `control_mode`\n" + "- to run the ip_adapter workflow, you need to provide `ip_adapter_image`\n" + "- for text-to-image generation, all you need to provide is `prompt`" ) # controlnet (input + denoise step) class StableDiffusionXLAutoControlnetStep(SequentialPipelineBlocks): block_classes = [ StableDiffusionXLAutoControlNetInputStep, StableDiffusionXLAutoControlNetDenoiseStep, ] block_names = ["controlnet_input", "controlnet_denoise"] @property def description(self): return ( "Controlnet auto step that prepare the controlnet input and denoise the latents. " + "It works for both controlnet and controlnet_union and supports text2img, img2img and inpainting tasks." + " (it should be replace at 'denoise' step)" ) TEXT2IMAGE_BLOCKS = InsertableDict( [ ("text_encoder", StableDiffusionXLTextEncoderStep), ("input", StableDiffusionXLInputStep), ("set_timesteps", StableDiffusionXLSetTimestepsStep), ("prepare_latents", StableDiffusionXLPrepareLatentsStep), ("prepare_add_cond", StableDiffusionXLPrepareAdditionalConditioningStep), ("denoise", StableDiffusionXLDenoiseStep), ("decode", StableDiffusionXLDecodeStep), ] ) IMAGE2IMAGE_BLOCKS = InsertableDict( [ ("text_encoder", StableDiffusionXLTextEncoderStep), ("image_encoder", StableDiffusionXLVaeEncoderStep), ("input", StableDiffusionXLInputStep), ("set_timesteps", StableDiffusionXLImg2ImgSetTimestepsStep), ("prepare_latents", StableDiffusionXLImg2ImgPrepareLatentsStep), ("prepare_add_cond", StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep), ("denoise", StableDiffusionXLDenoiseStep), ("decode", StableDiffusionXLDecodeStep), ] ) INPAINT_BLOCKS = InsertableDict( [ ("text_encoder", StableDiffusionXLTextEncoderStep), ("image_encoder", StableDiffusionXLInpaintVaeEncoderStep), ("input", StableDiffusionXLInputStep), ("set_timesteps", StableDiffusionXLImg2ImgSetTimestepsStep), ("prepare_latents", StableDiffusionXLInpaintPrepareLatentsStep), ("prepare_add_cond", StableDiffusionXLImg2ImgPrepareAdditionalConditioningStep), ("denoise", StableDiffusionXLInpaintDenoiseStep), ("decode", StableDiffusionXLInpaintDecodeStep), ] ) CONTROLNET_BLOCKS = InsertableDict( [ ("denoise", StableDiffusionXLAutoControlnetStep), ] ) IP_ADAPTER_BLOCKS = InsertableDict( [ ("ip_adapter", StableDiffusionXLAutoIPAdapterStep), ] ) AUTO_BLOCKS = InsertableDict( [ ("text_encoder", StableDiffusionXLTextEncoderStep), ("ip_adapter", StableDiffusionXLAutoIPAdapterStep), ("image_encoder", StableDiffusionXLAutoVaeEncoderStep), ("before_denoise", StableDiffusionXLAutoBeforeDenoiseStep), ("controlnet_input", StableDiffusionXLAutoControlNetInputStep), ("denoise", StableDiffusionXLAutoDenoiseStep), ("decode", StableDiffusionXLAutoDecodeStep), ] ) ALL_BLOCKS = { "text2img": TEXT2IMAGE_BLOCKS, "img2img": IMAGE2IMAGE_BLOCKS, "inpaint": INPAINT_BLOCKS, "controlnet": CONTROLNET_BLOCKS, "ip_adapter": IP_ADAPTER_BLOCKS, "auto": AUTO_BLOCKS, }
diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/modular_blocks.py", "repo_id": "diffusers", "token_count": 6428 }
169
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, _LazyModule, ) _import_structure = { "pipeline_consistency_models": ["ConsistencyModelPipeline"], } if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_consistency_models import ConsistencyModelPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/consistency_models/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/consistency_models/__init__.py", "repo_id": "diffusers", "token_count": 209 }
170
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import torch from ...models import UNet2DModel from ...schedulers import DDIMScheduler from ...utils import is_torch_xla_available from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False class DDIMPipeline(DiffusionPipeline): r""" Pipeline for image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet: UNet2DModel, scheduler: DDIMScheduler): super().__init__() # make sure scheduler can always be converted to DDIM scheduler = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, eta: float = 0.0, num_inference_steps: int = 50, use_clipped_model_output: Optional[bool] = None, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. A value of `0` corresponds to DDIM and `1` corresponds to DDPM. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. use_clipped_model_output (`bool`, *optional*, defaults to `None`): If `True` or `False`, see documentation for [`DDIMScheduler.step`]. If `None`, nothing is passed downstream to the scheduler (use `None` for schedulers which don't support this argument). output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Example: ```py >>> from diffusers import DDIMPipeline >>> import PIL.Image >>> import numpy as np >>> # load model and scheduler >>> pipe = DDIMPipeline.from_pretrained("fusing/ddim-lsun-bedroom") >>> # run pipeline in inference (sample random noise and denoise) >>> image = pipe(eta=0.0, num_inference_steps=50) >>> # process image to PIL >>> image_processed = image.cpu().permute(0, 2, 3, 1) >>> image_processed = (image_processed + 1.0) * 127.5 >>> image_processed = image_processed.numpy().astype(np.uint8) >>> image_pil = PIL.Image.fromarray(image_processed[0]) >>> # save image >>> image_pil.save("test.png") ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size, int): image_shape = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step( model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator ).prev_sample if XLA_AVAILABLE: xm.mark_step() image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/ddim/pipeline_ddim.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ddim/pipeline_ddim.py", "repo_id": "diffusers", "token_count": 2890 }
171
from typing import TYPE_CHECKING from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["modeling_roberta_series"] = ["RobertaSeriesModelWithTransformation"] _import_structure["pipeline_alt_diffusion"] = ["AltDiffusionPipeline"] _import_structure["pipeline_alt_diffusion_img2img"] = ["AltDiffusionImg2ImgPipeline"] _import_structure["pipeline_output"] = ["AltDiffusionPipelineOutput"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import * else: from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .pipeline_alt_diffusion import AltDiffusionPipeline from .pipeline_alt_diffusion_img2img import AltDiffusionImg2ImgPipeline from .pipeline_output import AltDiffusionPipelineOutput else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 685 }
172
# flake8: noqa from typing import TYPE_CHECKING from ....utils import ( DIFFUSERS_SLOW_IMPORT, _LazyModule, is_note_seq_available, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, get_objects_from_module, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["continous_encoder"] = ["SpectrogramContEncoder"] _import_structure["notes_encoder"] = ["SpectrogramNotesEncoder"] _import_structure["pipeline_spectrogram_diffusion"] = [ "SpectrogramContEncoder", "SpectrogramDiffusionPipeline", "T5FilmDecoder", ] try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils import dummy_transformers_and_torch_and_note_seq_objects _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) else: _import_structure["midi_utils"] = ["MidiProcessor"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_spectrogram_diffusion import SpectrogramDiffusionPipeline from .pipeline_spectrogram_diffusion import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import SpectrogramNotesEncoder from .pipeline_spectrogram_diffusion import T5FilmDecoder try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_transformers_and_torch_and_note_seq_objects import * else: from .midi_utils import MidiProcessor else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 985 }
173
import inspect from typing import Callable, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModel from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import logging from ...pipeline_utils import DiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name class VersatileDiffusionPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModel image_encoder: CLIPVisionModel image_unet: UNet2DConditionModel text_unet: UNet2DConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers def __init__( self, tokenizer: CLIPTokenizer, image_feature_extractor: CLIPImageProcessor, text_encoder: CLIPTextModel, image_encoder: CLIPVisionModel, image_unet: UNet2DConditionModel, text_unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( tokenizer=tokenizer, image_feature_extractor=image_feature_extractor, text_encoder=text_encoder, image_encoder=image_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 @torch.no_grad() def image_variation( self, image: Union[torch.Tensor, PIL.Image.Image], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" The call function to the pipeline for generation. Args: image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`): The image prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionPipeline >>> import torch >>> import requests >>> from io import BytesIO >>> from PIL import Image >>> # let's download an initial image >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" >>> response = requests.get(url) >>> image = Image.open(BytesIO(response.content)).convert("RGB") >>> pipe = VersatileDiffusionPipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> image = pipe.image_variation(image, generator=generator).images[0] >>> image.save("./car_variation.png") ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ expected_components = inspect.signature(VersatileDiffusionImageVariationPipeline.__init__).parameters.keys() components = {name: component for name, component in self.components.items() if name in expected_components} return VersatileDiffusionImageVariationPipeline(**components)( image=image, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, ) @torch.no_grad() def text_to_image( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionPipeline >>> import torch >>> pipe = VersatileDiffusionPipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> image = pipe.text_to_image("an astronaut riding on a horse on mars", generator=generator).images[0] >>> image.save("./astronaut.png") ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ expected_components = inspect.signature(VersatileDiffusionTextToImagePipeline.__init__).parameters.keys() components = {name: component for name, component in self.components.items() if name in expected_components} temp_pipeline = VersatileDiffusionTextToImagePipeline(**components) output = temp_pipeline( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, ) # swap the attention blocks back to the original state temp_pipeline._swap_unet_attention_blocks() return output @torch.no_grad() def dual_guided( self, prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], image: Union[str, List[str]], text_to_image_strength: float = 0.5, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionPipeline >>> import torch >>> import requests >>> from io import BytesIO >>> from PIL import Image >>> # let's download an initial image >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" >>> response = requests.get(url) >>> image = Image.open(BytesIO(response.content)).convert("RGB") >>> text = "a red car in the sun" >>> pipe = VersatileDiffusionPipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> text_to_image_strength = 0.75 >>> image = pipe.dual_guided( ... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator ... ).images[0] >>> image.save("./car_variation.png") ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ expected_components = inspect.signature(VersatileDiffusionDualGuidedPipeline.__init__).parameters.keys() components = {name: component for name, component in self.components.items() if name in expected_components} temp_pipeline = VersatileDiffusionDualGuidedPipeline(**components) output = temp_pipeline( prompt=prompt, image=image, text_to_image_strength=text_to_image_strength, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, output_type=output_type, return_dict=return_dict, callback=callback, callback_steps=callback_steps, ) temp_pipeline._revert_dual_attention() return output
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion.py", "repo_id": "diffusers", "token_count": 9163 }
174
# Copyright 2025 HiDream-ai Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import ( CLIPTextModelWithProjection, CLIPTokenizer, LlamaForCausalLM, PreTrainedTokenizerFast, T5EncoderModel, T5Tokenizer, ) from ...image_processor import VaeImageProcessor from ...loaders import HiDreamImageLoraLoaderMixin from ...models import AutoencoderKL, HiDreamImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler, UniPCMultistepScheduler from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import HiDreamImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from transformers import AutoTokenizer, LlamaForCausalLM >>> from diffusers import HiDreamImagePipeline >>> tokenizer_4 = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct") >>> text_encoder_4 = LlamaForCausalLM.from_pretrained( ... "meta-llama/Meta-Llama-3.1-8B-Instruct", ... output_hidden_states=True, ... output_attentions=True, ... torch_dtype=torch.bfloat16, ... ) >>> pipe = HiDreamImagePipeline.from_pretrained( ... "HiDream-ai/HiDream-I1-Full", ... tokenizer_4=tokenizer_4, ... text_encoder_4=text_encoder_4, ... torch_dtype=torch.bfloat16, ... ) >>> pipe.enable_model_cpu_offload() >>> image = pipe( ... 'A cat holding a sign that says "Hi-Dreams.ai".', ... height=1024, ... width=1024, ... guidance_scale=5.0, ... num_inference_steps=50, ... generator=torch.Generator("cuda").manual_seed(0), ... ).images[0] >>> image.save("output.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class HiDreamImagePipeline(DiffusionPipeline, HiDreamImageLoraLoaderMixin): model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->text_encoder_4->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds_t5", "prompt_embeds_llama3", "pooled_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_encoder_2: CLIPTextModelWithProjection, tokenizer_2: CLIPTokenizer, text_encoder_3: T5EncoderModel, tokenizer_3: T5Tokenizer, text_encoder_4: LlamaForCausalLM, tokenizer_4: PreTrainedTokenizerFast, transformer: HiDreamImageTransformer2DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, text_encoder_3=text_encoder_3, text_encoder_4=text_encoder_4, tokenizer=tokenizer, tokenizer_2=tokenizer_2, tokenizer_3=tokenizer_3, tokenizer_4=tokenizer_4, scheduler=scheduler, transformer=transformer, ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 ) # HiDreamImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.default_sample_size = 128 if getattr(self, "tokenizer_4", None) is not None: self.tokenizer_4.pad_token = self.tokenizer_4.eos_token def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder_3.dtype prompt = [prompt] if isinstance(prompt, str) else prompt text_inputs = self.tokenizer_3( prompt, padding="max_length", max_length=min(max_sequence_length, self.tokenizer_3.model_max_length), truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_3.batch_decode( untruncated_ids[:, min(max_sequence_length, self.tokenizer_3.model_max_length) - 1 : -1] ) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {min(max_sequence_length, self.tokenizer_3.model_max_length)} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_3(text_input_ids.to(device), attention_mask=attention_mask.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) return prompt_embeds def _get_clip_prompt_embeds( self, tokenizer, text_encoder, prompt: Union[str, List[str]], max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt text_inputs = tokenizer( prompt, padding="max_length", max_length=min(max_sequence_length, 218), truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = tokenizer.batch_decode(untruncated_ids[:, 218 - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {218} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # Use pooled output of CLIPTextModel prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) return prompt_embeds def _get_llama3_prompt_embeds( self, prompt: Union[str, List[str]] = None, max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder_4.dtype prompt = [prompt] if isinstance(prompt, str) else prompt text_inputs = self.tokenizer_4( prompt, padding="max_length", max_length=min(max_sequence_length, self.tokenizer_4.model_max_length), truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer_4(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_4.batch_decode( untruncated_ids[:, min(max_sequence_length, self.tokenizer_4.model_max_length) - 1 : -1] ) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {min(max_sequence_length, self.tokenizer_4.model_max_length)} tokens: {removed_text}" ) outputs = self.text_encoder_4( text_input_ids.to(device), attention_mask=attention_mask.to(device), output_hidden_states=True, output_attentions=True, ) prompt_embeds = outputs.hidden_states[1:] prompt_embeds = torch.stack(prompt_embeds, dim=0) return prompt_embeds def encode_prompt( self, prompt: Optional[Union[str, List[str]]] = None, prompt_2: Optional[Union[str, List[str]]] = None, prompt_3: Optional[Union[str, List[str]]] = None, prompt_4: Optional[Union[str, List[str]]] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, negative_prompt_3: Optional[Union[str, List[str]]] = None, negative_prompt_4: Optional[Union[str, List[str]]] = None, prompt_embeds_t5: Optional[List[torch.FloatTensor]] = None, prompt_embeds_llama3: Optional[List[torch.FloatTensor]] = None, negative_prompt_embeds_t5: Optional[List[torch.FloatTensor]] = None, negative_prompt_embeds_llama3: Optional[List[torch.FloatTensor]] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, max_sequence_length: int = 128, lora_scale: Optional[float] = None, ): prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = pooled_prompt_embeds.shape[0] device = device or self._execution_device if pooled_prompt_embeds is None: pooled_prompt_embeds_1 = self._get_clip_prompt_embeds( self.tokenizer, self.text_encoder, prompt, max_sequence_length, device, dtype ) if do_classifier_free_guidance and negative_pooled_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if len(negative_prompt) > 1 and len(negative_prompt) != batch_size: raise ValueError(f"negative_prompt must be of length 1 or {batch_size}") negative_pooled_prompt_embeds_1 = self._get_clip_prompt_embeds( self.tokenizer, self.text_encoder, negative_prompt, max_sequence_length, device, dtype ) if negative_pooled_prompt_embeds_1.shape[0] == 1 and batch_size > 1: negative_pooled_prompt_embeds_1 = negative_pooled_prompt_embeds_1.repeat(batch_size, 1) if pooled_prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 if len(prompt_2) > 1 and len(prompt_2) != batch_size: raise ValueError(f"prompt_2 must be of length 1 or {batch_size}") pooled_prompt_embeds_2 = self._get_clip_prompt_embeds( self.tokenizer_2, self.text_encoder_2, prompt_2, max_sequence_length, device, dtype ) if pooled_prompt_embeds_2.shape[0] == 1 and batch_size > 1: pooled_prompt_embeds_2 = pooled_prompt_embeds_2.repeat(batch_size, 1) if do_classifier_free_guidance and negative_pooled_prompt_embeds is None: negative_prompt_2 = negative_prompt_2 or negative_prompt negative_prompt_2 = [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 if len(negative_prompt_2) > 1 and len(negative_prompt_2) != batch_size: raise ValueError(f"negative_prompt_2 must be of length 1 or {batch_size}") negative_pooled_prompt_embeds_2 = self._get_clip_prompt_embeds( self.tokenizer_2, self.text_encoder_2, negative_prompt_2, max_sequence_length, device, dtype ) if negative_pooled_prompt_embeds_2.shape[0] == 1 and batch_size > 1: negative_pooled_prompt_embeds_2 = negative_pooled_prompt_embeds_2.repeat(batch_size, 1) if pooled_prompt_embeds is None: pooled_prompt_embeds = torch.cat([pooled_prompt_embeds_1, pooled_prompt_embeds_2], dim=-1) if do_classifier_free_guidance and negative_pooled_prompt_embeds is None: negative_pooled_prompt_embeds = torch.cat( [negative_pooled_prompt_embeds_1, negative_pooled_prompt_embeds_2], dim=-1 ) if prompt_embeds_t5 is None: prompt_3 = prompt_3 or prompt prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3 if len(prompt_3) > 1 and len(prompt_3) != batch_size: raise ValueError(f"prompt_3 must be of length 1 or {batch_size}") prompt_embeds_t5 = self._get_t5_prompt_embeds(prompt_3, max_sequence_length, device, dtype) if prompt_embeds_t5.shape[0] == 1 and batch_size > 1: prompt_embeds_t5 = prompt_embeds_t5.repeat(batch_size, 1, 1) if do_classifier_free_guidance and negative_prompt_embeds_t5 is None: negative_prompt_3 = negative_prompt_3 or negative_prompt negative_prompt_3 = [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3 if len(negative_prompt_3) > 1 and len(negative_prompt_3) != batch_size: raise ValueError(f"negative_prompt_3 must be of length 1 or {batch_size}") negative_prompt_embeds_t5 = self._get_t5_prompt_embeds( negative_prompt_3, max_sequence_length, device, dtype ) if negative_prompt_embeds_t5.shape[0] == 1 and batch_size > 1: negative_prompt_embeds_t5 = negative_prompt_embeds_t5.repeat(batch_size, 1, 1) if prompt_embeds_llama3 is None: prompt_4 = prompt_4 or prompt prompt_4 = [prompt_4] if isinstance(prompt_4, str) else prompt_4 if len(prompt_4) > 1 and len(prompt_4) != batch_size: raise ValueError(f"prompt_4 must be of length 1 or {batch_size}") prompt_embeds_llama3 = self._get_llama3_prompt_embeds(prompt_4, max_sequence_length, device, dtype) if prompt_embeds_llama3.shape[0] == 1 and batch_size > 1: prompt_embeds_llama3 = prompt_embeds_llama3.repeat(1, batch_size, 1, 1) if do_classifier_free_guidance and negative_prompt_embeds_llama3 is None: negative_prompt_4 = negative_prompt_4 or negative_prompt negative_prompt_4 = [negative_prompt_4] if isinstance(negative_prompt_4, str) else negative_prompt_4 if len(negative_prompt_4) > 1 and len(negative_prompt_4) != batch_size: raise ValueError(f"negative_prompt_4 must be of length 1 or {batch_size}") negative_prompt_embeds_llama3 = self._get_llama3_prompt_embeds( negative_prompt_4, max_sequence_length, device, dtype ) if negative_prompt_embeds_llama3.shape[0] == 1 and batch_size > 1: negative_prompt_embeds_llama3 = negative_prompt_embeds_llama3.repeat(1, batch_size, 1, 1) # duplicate pooled_prompt_embeds for each generation per prompt pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt) pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) # duplicate t5_prompt_embeds for batch_size and num_images_per_prompt bs_embed, seq_len, _ = prompt_embeds_t5.shape if bs_embed == 1 and batch_size > 1: prompt_embeds_t5 = prompt_embeds_t5.repeat(batch_size, 1, 1) elif bs_embed > 1 and bs_embed != batch_size: raise ValueError(f"cannot duplicate prompt_embeds_t5 of batch size {bs_embed}") prompt_embeds_t5 = prompt_embeds_t5.repeat(1, num_images_per_prompt, 1) prompt_embeds_t5 = prompt_embeds_t5.view(batch_size * num_images_per_prompt, seq_len, -1) # duplicate llama3_prompt_embeds for batch_size and num_images_per_prompt _, bs_embed, seq_len, dim = prompt_embeds_llama3.shape if bs_embed == 1 and batch_size > 1: prompt_embeds_llama3 = prompt_embeds_llama3.repeat(1, batch_size, 1, 1) elif bs_embed > 1 and bs_embed != batch_size: raise ValueError(f"cannot duplicate prompt_embeds_llama3 of batch size {bs_embed}") prompt_embeds_llama3 = prompt_embeds_llama3.repeat(1, 1, num_images_per_prompt, 1) prompt_embeds_llama3 = prompt_embeds_llama3.view(-1, batch_size * num_images_per_prompt, seq_len, dim) if do_classifier_free_guidance: # duplicate negative_pooled_prompt_embeds for batch_size and num_images_per_prompt bs_embed, seq_len = negative_pooled_prompt_embeds.shape if bs_embed == 1 and batch_size > 1: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(batch_size, 1) elif bs_embed > 1 and bs_embed != batch_size: raise ValueError(f"cannot duplicate negative_pooled_prompt_embeds of batch size {bs_embed}") negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt) negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1) # duplicate negative_t5_prompt_embeds for batch_size and num_images_per_prompt bs_embed, seq_len, _ = negative_prompt_embeds_t5.shape if bs_embed == 1 and batch_size > 1: negative_prompt_embeds_t5 = negative_prompt_embeds_t5.repeat(batch_size, 1, 1) elif bs_embed > 1 and bs_embed != batch_size: raise ValueError(f"cannot duplicate negative_prompt_embeds_t5 of batch size {bs_embed}") negative_prompt_embeds_t5 = negative_prompt_embeds_t5.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds_t5 = negative_prompt_embeds_t5.view(batch_size * num_images_per_prompt, seq_len, -1) # duplicate negative_prompt_embeds_llama3 for batch_size and num_images_per_prompt _, bs_embed, seq_len, dim = negative_prompt_embeds_llama3.shape if bs_embed == 1 and batch_size > 1: negative_prompt_embeds_llama3 = negative_prompt_embeds_llama3.repeat(1, batch_size, 1, 1) elif bs_embed > 1 and bs_embed != batch_size: raise ValueError(f"cannot duplicate negative_prompt_embeds_llama3 of batch size {bs_embed}") negative_prompt_embeds_llama3 = negative_prompt_embeds_llama3.repeat(1, 1, num_images_per_prompt, 1) negative_prompt_embeds_llama3 = negative_prompt_embeds_llama3.view( -1, batch_size * num_images_per_prompt, seq_len, dim ) return ( prompt_embeds_t5, negative_prompt_embeds_t5, prompt_embeds_llama3, negative_prompt_embeds_llama3, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() def check_inputs( self, prompt, prompt_2, prompt_3, prompt_4, negative_prompt=None, negative_prompt_2=None, negative_prompt_3=None, negative_prompt_4=None, prompt_embeds_t5=None, prompt_embeds_llama3=None, negative_prompt_embeds_t5=None, negative_prompt_embeds_llama3=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and pooled_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `pooled_prompt_embeds`: {pooled_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and pooled_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `pooled_prompt_embeds`: {pooled_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_3 is not None and prompt_embeds_t5 is not None: raise ValueError( f"Cannot forward both `prompt_3`: {prompt_3} and `prompt_embeds_t5`: {prompt_embeds_t5}. Please make sure to" " only forward one of the two." ) elif prompt_4 is not None and prompt_embeds_llama3 is not None: raise ValueError( f"Cannot forward both `prompt_4`: {prompt_4} and `prompt_embeds_llama3`: {prompt_embeds_llama3}. Please make sure to" " only forward one of the two." ) elif prompt is None and pooled_prompt_embeds is None: raise ValueError( "Provide either `prompt` or `pooled_prompt_embeds`. Cannot leave both `prompt` and `pooled_prompt_embeds` undefined." ) elif prompt is None and prompt_embeds_t5 is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_t5`. Cannot leave both `prompt` and `prompt_embeds_t5` undefined." ) elif prompt is None and prompt_embeds_llama3 is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_llama3`. Cannot leave both `prompt` and `prompt_embeds_llama3` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)): raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}") elif prompt_4 is not None and (not isinstance(prompt_4, str) and not isinstance(prompt_4, list)): raise ValueError(f"`prompt_4` has to be of type `str` or `list` but is {type(prompt_4)}") if negative_prompt is not None and negative_pooled_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_pooled_prompt_embeds`:" f" {negative_pooled_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_pooled_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_pooled_prompt_embeds`:" f" {negative_pooled_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_3 is not None and negative_prompt_embeds_t5 is not None: raise ValueError( f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds_t5`:" f" {negative_prompt_embeds_t5}. Please make sure to only forward one of the two." ) elif negative_prompt_4 is not None and negative_prompt_embeds_llama3 is not None: raise ValueError( f"Cannot forward both `negative_prompt_4`: {negative_prompt_4} and `negative_prompt_embeds_llama3`:" f" {negative_prompt_embeds_llama3}. Please make sure to only forward one of the two." ) if pooled_prompt_embeds is not None and negative_pooled_prompt_embeds is not None: if pooled_prompt_embeds.shape != negative_pooled_prompt_embeds.shape: raise ValueError( "`pooled_prompt_embeds` and `negative_pooled_prompt_embeds` must have the same shape when passed directly, but" f" got: `pooled_prompt_embeds` {pooled_prompt_embeds.shape} != `negative_pooled_prompt_embeds`" f" {negative_pooled_prompt_embeds.shape}." ) if prompt_embeds_t5 is not None and negative_prompt_embeds_t5 is not None: if prompt_embeds_t5.shape != negative_prompt_embeds_t5.shape: raise ValueError( "`prompt_embeds_t5` and `negative_prompt_embeds_t5` must have the same shape when passed directly, but" f" got: `prompt_embeds_t5` {prompt_embeds_t5.shape} != `negative_prompt_embeds_t5`" f" {negative_prompt_embeds_t5.shape}." ) if prompt_embeds_llama3 is not None and negative_prompt_embeds_llama3 is not None: if prompt_embeds_llama3.shape != negative_prompt_embeds_llama3.shape: raise ValueError( "`prompt_embeds_llama3` and `negative_prompt_embeds_llama3` must have the same shape when passed directly, but" f" got: `prompt_embeds_llama3` {prompt_embeds_llama3.shape} != `negative_prompt_embeds_llama3`" f" {negative_prompt_embeds_llama3.shape}." ) def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def attention_kwargs(self): return self._attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, prompt_3: Optional[Union[str, List[str]]] = None, prompt_4: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, negative_prompt_3: Optional[Union[str, List[str]]] = None, negative_prompt_4: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds_t5: Optional[torch.FloatTensor] = None, prompt_embeds_llama3: Optional[torch.FloatTensor] = None, negative_prompt_embeds_t5: Optional[torch.FloatTensor] = None, negative_prompt_embeds_llama3: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 128, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is will be used instead. prompt_3 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is will be used instead. prompt_4 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_4` and `text_encoder_4`. If not defined, `prompt` is will be used instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 3.5): Embedded guiddance scale is enabled by setting `guidance_scale` > 1. Higher `guidance_scale` encourages a model to generate images more aligned with `prompt` at the expense of lower image quality. Guidance-distilled models approximates true classifer-free guidance for `guidance_scale` > 1. Refer to the [paper](https://huggingface.co/papers/2210.03142) to learn more. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. negative_prompt_3 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `negative_prompt` is used in all the text-encoders. negative_prompt_4 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_4` and `text_encoder_4`. If not defined, `negative_prompt` is used in all the text-encoders. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 128): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.hidream_image.HiDreamImagePipelineOutput`] or `tuple`: [`~pipelines.hidream_image.HiDreamImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated. images. """ prompt_embeds = kwargs.get("prompt_embeds", None) negative_prompt_embeds = kwargs.get("negative_prompt_embeds", None) if prompt_embeds is not None: deprecation_message = "The `prompt_embeds` argument is deprecated. Please use `prompt_embeds_t5` and `prompt_embeds_llama3` instead." deprecate("prompt_embeds", "0.35.0", deprecation_message) prompt_embeds_t5 = prompt_embeds[0] prompt_embeds_llama3 = prompt_embeds[1] if negative_prompt_embeds is not None: deprecation_message = "The `negative_prompt_embeds` argument is deprecated. Please use `negative_prompt_embeds_t5` and `negative_prompt_embeds_llama3` instead." deprecate("negative_prompt_embeds", "0.35.0", deprecation_message) negative_prompt_embeds_t5 = negative_prompt_embeds[0] negative_prompt_embeds_llama3 = negative_prompt_embeds[1] height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor division = self.vae_scale_factor * 2 S_max = (self.default_sample_size * self.vae_scale_factor) ** 2 scale = S_max / (width * height) scale = math.sqrt(scale) width, height = int(width * scale // division * division), int(height * scale // division * division) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, prompt_3, prompt_4, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, negative_prompt_4=negative_prompt_4, prompt_embeds_t5=prompt_embeds_t5, prompt_embeds_llama3=prompt_embeds_llama3, negative_prompt_embeds_t5=negative_prompt_embeds_t5, negative_prompt_embeds_llama3=negative_prompt_embeds_llama3, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) elif pooled_prompt_embeds is not None: batch_size = pooled_prompt_embeds.shape[0] device = self._execution_device # 3. Encode prompt lora_scale = self.attention_kwargs.get("scale", None) if self.attention_kwargs is not None else None ( prompt_embeds_t5, negative_prompt_embeds_t5, prompt_embeds_llama3, negative_prompt_embeds_llama3, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, prompt_4=prompt_4, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, negative_prompt_4=negative_prompt_4, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds_t5=prompt_embeds_t5, prompt_embeds_llama3=prompt_embeds_llama3, negative_prompt_embeds_t5=negative_prompt_embeds_t5, negative_prompt_embeds_llama3=negative_prompt_embeds_llama3, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale, ) if self.do_classifier_free_guidance: prompt_embeds_t5 = torch.cat([negative_prompt_embeds_t5, prompt_embeds_t5], dim=0) prompt_embeds_llama3 = torch.cat([negative_prompt_embeds_llama3, prompt_embeds_llama3], dim=1) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, pooled_prompt_embeds.dtype, device, generator, latents, ) # 5. Prepare timesteps mu = calculate_shift(self.transformer.max_seq) scheduler_kwargs = {"mu": mu} if isinstance(self.scheduler, UniPCMultistepScheduler): self.scheduler.set_timesteps(num_inference_steps, device=device) # , shift=math.exp(mu)) timesteps = self.scheduler.timesteps else: timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, **scheduler_kwargs, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, timesteps=timestep, encoder_hidden_states_t5=prompt_embeds_t5, encoder_hidden_states_llama3=prompt_embeds_llama3, pooled_embeds=pooled_prompt_embeds, return_dict=False, )[0] noise_pred = -noise_pred # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds_t5 = callback_outputs.pop("prompt_embeds_t5", prompt_embeds_t5) prompt_embeds_llama3 = callback_outputs.pop("prompt_embeds_llama3", prompt_embeds_llama3) pooled_prompt_embeds = callback_outputs.pop("pooled_prompt_embeds", pooled_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": image = latents else: latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return HiDreamImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/hidream_image/pipeline_hidream_image.py", "repo_id": "diffusers", "token_count": 23253 }
175
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from packaging import version from PIL import Image from transformers import ( XLMRobertaTokenizer, ) from ... import __version__ from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler from ...utils import ( is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> import numpy as np >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "a hat" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyInpaintPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ) >>> mask = np.zeros((768, 768), dtype=np.float32) >>> mask[:250, 250:-250] = 1 >>> out = pipe( ... prompt, ... image=init_image, ... mask_image=mask, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ) >>> image = out.images[0] >>> image.save("cat_with_hat.png") ``` """ def get_new_h_w(h, w, scale_factor=8): new_h = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 new_w = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor def prepare_mask(masks): prepared_masks = [] for mask in masks: old_mask = deepcopy(mask) for i in range(mask.shape[1]): for j in range(mask.shape[2]): if old_mask[0][i][j] == 1: continue if i != 0: mask[:, i - 1, j] = 0 if j != 0: mask[:, i, j - 1] = 0 if i != 0 and j != 0: mask[:, i - 1, j - 1] = 0 if i != mask.shape[1] - 1: mask[:, i + 1, j] = 0 if j != mask.shape[2] - 1: mask[:, i, j + 1] = 0 if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: mask[:, i + 1, j + 1] = 0 prepared_masks.append(mask) return torch.stack(prepared_masks, dim=0) def prepare_mask_and_masked_image(image, mask, height, width): r""" Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the ``image`` and ``1`` for the ``mask``. The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. Args: image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. mask (_type_): The mask to apply to the image, i.e. regions to inpaint. It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. Raises: ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not (ot the other way around). Returns: tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. """ if image is None: raise ValueError("`image` input cannot be undefined.") if mask is None: raise ValueError("`mask_image` input cannot be undefined.") if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) # Batch and add channel dim for single mask if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) # Batch single mask or add channel dim if mask.ndim == 3: # Single batched mask, no channel dim or single mask not batched but channel dim if mask.shape[0] == 1: mask = mask.unsqueeze(0) # Batched masks no channel dim else: mask = mask.unsqueeze(1) assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") # Check mask is in [0, 1] if mask.min() < 0 or mask.max() > 1: raise ValueError("Mask should be in [0, 1] range") # Binarize mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 # Image as float32 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): # resize all images w.r.t passed height an width image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 # preprocess mask if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) mask = 1 - mask return mask, image class KandinskyInpaintPipeline(DiffusionPipeline): """ Pipeline for text-guided image inpainting using Kandinsky2.1 This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler ([`DDIMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ image encoder and decoder """ model_cpu_offload_seq = "text_encoder->unet->movq" def __init__( self, text_encoder: MultilingualCLIP, movq: VQModel, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, ): super().__init__() self.register_modules( text_encoder=text_encoder, movq=movq, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, ): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids.to(device) text_mask = text_inputs.attention_mask.to(device) prompt_embeds, text_encoder_hidden_states = self.text_encoder( input_ids=text_input_ids, attention_mask=text_mask ) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) uncond_text_input_ids = uncond_input.input_ids.to(device) uncond_text_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image, np.ndarray], image_embeds: torch.Tensor, negative_image_embeds: torch.Tensor, negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.Tensor`, `PIL.Image.Image` or `np.ndarray`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`PIL.Image.Image`,`torch.Tensor` or `np.ndarray`): `Image`, or a tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. You can pass a pytorch tensor as mask only if the image you passed is a pytorch tensor, and it should contain one color channel (L) instead of 3, so the expected shape would be either `(B, 1, H, W,)`, `(B, H, W)`, `(1, H, W)` or `(H, W)` If image is an PIL image or numpy array, mask should also be a either PIL image or numpy array. If it is a PIL image, it will be converted to a single channel (luminance) before use. If it is a nummpy array, the expected shape is `(H, W)`. image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. negative_image_embeds (`torch.Tensor` or `List[torch.Tensor]`): The clip image embeddings for negative text prompt, will be used to condition the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( "0.23.0.dev0" ): logger.warning( "Please note that the expected format of `mask_image` has recently been changed. " "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " "This warning will be suppressed after the first inference call and will be removed in diffusers>0.23.0" ) self._warn_has_been_called = True # Define call parameters if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( dtype=prompt_embeds.dtype, device=device ) # preprocess image and mask mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width) image = image.to(dtype=prompt_embeds.dtype, device=device) image = self.movq.encode(image)["latents"] mask_image = mask_image.to(dtype=prompt_embeds.dtype, device=device) image_shape = tuple(image.shape[-2:]) mask_image = F.interpolate( mask_image, image_shape, mode="nearest", ) mask_image = prepare_mask(mask_image) masked_image = image * mask_image mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: mask_image = mask_image.repeat(2, 1, 1, 1) masked_image = masked_image.repeat(2, 1, 1, 1) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps_tensor = self.scheduler.timesteps num_channels_latents = self.movq.config.latent_channels # get h, w for latents sample_height, sample_width = get_new_h_w(height, width, self.movq_scale_factor) # create initial latent latents = self.prepare_latents( (batch_size, num_channels_latents, sample_height, sample_width), text_encoder_hidden_states.dtype, device, generator, latents, self.scheduler, ) # Check that sizes of mask, masked image and latents match with expected num_channels_mask = mask_image.shape[1] num_channels_masked_image = masked_image.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} noise_pred = self.unet( sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if do_classifier_free_guidance: noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) _, variance_pred_text = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, ).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] self.maybe_free_model_hooks() if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") if output_type in ["np", "pil"]: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py", "repo_id": "diffusers", "token_count": 12890 }
176
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import ( Attention, AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor, ) from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler from ...utils import ( USE_PEFT_BACKEND, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import LEditsPPPipelineStableDiffusionXL >>> from diffusers.utils import load_image >>> pipe = LEditsPPPipelineStableDiffusionXL.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=torch.float16 ... ) >>> pipe.enable_vae_tiling() >>> pipe = pipe.to("cuda") >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/tennis.jpg" >>> image = load_image(img_url).resize((1024, 1024)) >>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.2) >>> edited_image = pipe( ... editing_prompt=["tennis ball", "tomato"], ... reverse_editing_direction=[True, False], ... edit_guidance_scale=[5.0, 10.0], ... edit_threshold=[0.9, 0.85], ... ).images[0] ``` """ # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LeditsAttentionStore class LeditsAttentionStore: @staticmethod def get_empty_store(): return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []} def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): # attn.shape = batch_size * head_size, seq_len query, seq_len_key if attn.shape[1] <= self.max_size: bs = 1 + int(PnP) + editing_prompts skip = 2 if PnP else 1 # skip PnP & unconditional attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) source_batch_size = int(attn.shape[1] // bs) self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet) def forward(self, attn, is_cross: bool, place_in_unet: str): key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" self.step_store[key].append(attn) def between_steps(self, store_step=True): if store_step: if self.average: if len(self.attention_store) == 0: self.attention_store = self.step_store else: for key in self.attention_store: for i in range(len(self.attention_store[key])): self.attention_store[key][i] += self.step_store[key][i] else: if len(self.attention_store) == 0: self.attention_store = [self.step_store] else: self.attention_store.append(self.step_store) self.cur_step += 1 self.step_store = self.get_empty_store() def get_attention(self, step: int): if self.average: attention = { key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store } else: assert step is not None attention = self.attention_store[step] return attention def aggregate_attention( self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int ): out = [[] for x in range(self.batch_size)] if isinstance(res, int): num_pixels = res**2 resolution = (res, res) else: num_pixels = res[0] * res[1] resolution = res[:2] for location in from_where: for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: for batch, item in enumerate(bs_item): if item.shape[1] == num_pixels: cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] out[batch].append(cross_maps) out = torch.stack([torch.cat(x, dim=0) for x in out]) # average over heads out = out.sum(1) / out.shape[1] return out def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None): self.step_store = self.get_empty_store() self.attention_store = [] self.cur_step = 0 self.average = average self.batch_size = batch_size if max_size is None: self.max_size = max_resolution**2 elif max_size is not None and max_resolution is None: self.max_size = max_size else: raise ValueError("Only allowed to set one of max_resolution or max_size") # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LeditsGaussianSmoothing class LeditsGaussianSmoothing: def __init__(self, device): kernel_size = [3, 3] sigma = [0.5, 0.5] # The gaussian kernel is the product of the gaussian function of each dimension. kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size], indexing="ij") for size, std, mgrid in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) # Make sure sum of values in gaussian kernel equals 1. kernel = kernel / torch.sum(kernel) # Reshape to depthwise convolutional weight kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) self.weight = kernel.to(device) def __call__(self, input): """ Arguments: Apply gaussian filter to input. input (torch.Tensor): Input to apply gaussian filter on. Returns: filtered (torch.Tensor): Filtered output. """ return F.conv2d(input, weight=self.weight.to(input.dtype)) # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEDITSCrossAttnProcessor class LEDITSCrossAttnProcessor: def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): self.attnstore = attention_store self.place_in_unet = place_in_unet self.editing_prompts = editing_prompts self.pnp = pnp def __call__( self, attn: Attention, hidden_states, encoder_hidden_states, attention_mask=None, temb=None, ): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) self.attnstore( attention_probs, is_cross=True, place_in_unet=self.place_in_unet, editing_prompts=self.editing_prompts, PnP=self.pnp, ) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class LEditsPPPipelineStableDiffusionXL( DiffusionPipeline, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, IPAdapterMixin, ): """ Pipeline for textual image editing using LEDits++ with Stable Diffusion XL. This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionXLPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). In addition the pipeline inherits the following loading methods: - *LoRA*: [`LEditsPPPipelineStableDiffusionXL.load_lora_weights`] - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] as well as the following saving methods: - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`] Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer ([`~transformers.CLIPTokenizer`]): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 ([`~transformers.CLIPTokenizer`]): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of [`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will automatically be set to [`DPMSolverMultistepScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DPMSolverMultistepScheduler, DDIMScheduler], image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler): self.scheduler = DPMSolverMultistepScheduler.from_config( scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2 ) logger.warning( "This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. " "The scheduler has been changed to DPMSolverMultistepScheduler." ) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.inversion_steps = None def encode_prompt( self, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, enable_edit_guidance: bool = True, editing_prompt: Optional[str] = None, editing_prompt_embeds: Optional[torch.Tensor] = None, editing_pooled_prompt_embeds: Optional[torch.Tensor] = None, ) -> object: r""" Encodes the prompt into text encoder hidden states. Args: device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. enable_edit_guidance (`bool`): Whether to guide towards an editing prompt or not. editing_prompt (`str` or `List[str]`, *optional*): Editing prompt(s) to be encoded. If not defined and 'enable_edit_guidance' is True, one has to pass `editing_prompt_embeds` instead. editing_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided and 'enable_edit_guidance' is True, editing_prompt_embeds will be generated from `editing_prompt` input argument. editing_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated edit pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled editing_pooled_prompt_embeds will be generated from `editing_prompt` input argument. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) batch_size = self.batch_size # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) num_edit_tokens = 0 # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but image inversion " f" has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of the input images." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(negative_prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(negative_pooled_prompt_embeds) if enable_edit_guidance and editing_prompt_embeds is None: editing_prompt_2 = editing_prompt editing_prompts = [editing_prompt, editing_prompt_2] edit_prompt_embeds_list = [] for editing_prompt, tokenizer, text_encoder in zip(editing_prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): editing_prompt = self.maybe_convert_prompt(editing_prompt, tokenizer) max_length = negative_prompt_embeds.shape[1] edit_concepts_input = tokenizer( # [x for item in editing_prompt for x in repeat(item, batch_size)], editing_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", return_length=True, ) num_edit_tokens = edit_concepts_input.length - 2 edit_concepts_embeds = text_encoder( edit_concepts_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder editing_pooled_prompt_embeds = edit_concepts_embeds[0] if clip_skip is None: edit_concepts_embeds = edit_concepts_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. edit_concepts_embeds = edit_concepts_embeds.hidden_states[-(clip_skip + 2)] edit_prompt_embeds_list.append(edit_concepts_embeds) edit_concepts_embeds = torch.concat(edit_prompt_embeds_list, dim=-1) elif not enable_edit_guidance: edit_concepts_embeds = None editing_pooled_prompt_embeds = None negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) bs_embed, seq_len, _ = negative_prompt_embeds.shape # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if enable_edit_guidance: bs_embed_edit, seq_len, _ = edit_concepts_embeds.shape edit_concepts_embeds = edit_concepts_embeds.to(dtype=self.text_encoder_2.dtype, device=device) edit_concepts_embeds = edit_concepts_embeds.repeat(1, num_images_per_prompt, 1) edit_concepts_embeds = edit_concepts_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if enable_edit_guidance: editing_pooled_prompt_embeds = editing_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed_edit * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return ( negative_prompt_embeds, edit_concepts_embeds, negative_pooled_prompt_embeds, editing_pooled_prompt_embeds, num_edit_tokens, ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, eta, generator=None): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, negative_prompt=None, negative_prompt_2=None, negative_prompt_embeds=None, negative_pooled_prompt_embeds=None, ): if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, device, latents): latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def num_timesteps(self): return self._num_timesteps def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.prepare_unet def prepare_unet(self, attention_store, PnP: bool = False): attn_procs = {} for name in self.unet.attn_processors.keys(): if name.startswith("mid_block"): place_in_unet = "mid" elif name.startswith("up_blocks"): place_in_unet = "up" elif name.startswith("down_blocks"): place_in_unet = "down" else: continue if "attn2" in name and place_in_unet != "mid": attn_procs[name] = LEDITSCrossAttnProcessor( attention_store=attention_store, place_in_unet=place_in_unet, pnp=PnP, editing_prompts=self.enabled_editing_prompts, ) else: attn_procs[name] = AttnProcessor() self.unet.set_attn_processor(attn_procs) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, denoising_end: Optional[float] = None, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, editing_prompt: Optional[Union[str, List[str]]] = None, editing_prompt_embeddings: Optional[torch.Tensor] = None, editing_pooled_prompt_embeds: Optional[torch.Tensor] = None, reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, edit_guidance_scale: Optional[Union[float, List[float]]] = 5, edit_warmup_steps: Optional[Union[int, List[int]]] = 0, edit_cooldown_steps: Optional[Union[int, List[int]]] = None, edit_threshold: Optional[Union[float, List[float]]] = 0.9, sem_guidance: Optional[List[torch.Tensor]] = None, use_cross_attn_mask: bool = False, use_intersect_mask: bool = False, user_mask: Optional[torch.Tensor] = None, attn_store_steps: Optional[List[int]] = [], store_averaged_over_steps: bool = True, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for editing. The [`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusionXL.invert`] method has to be called beforehand. Edits will always be performed for the last inverted image(s). Args: denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.7): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). editing_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. The image is reconstructed by setting `editing_prompt = None`. Guidance direction of prompt should be specified via `reverse_editing_direction`. editing_prompt_embeddings (`torch.Tensor`, *optional*): Pre-generated edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, editing_prompt_embeddings will be generated from `editing_prompt` input argument. editing_pooled_prompt_embeddings (`torch.Tensor`, *optional*): Pre-generated pooled edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, editing_prompt_embeddings will be generated from `editing_prompt` input argument. reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): Whether the corresponding prompt in `editing_prompt` should be increased or decreased. edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): Guidance scale for guiding the image generation. If provided as list values should correspond to `editing_prompt`. `edit_guidance_scale` is defined as `s_e` of equation 12 of [LEDITS++ Paper](https://huggingface.co/papers/2301.12247). edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): Number of diffusion steps (for each prompt) for which guidance is not applied. edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): Number of diffusion steps (for each prompt) after which guidance is no longer applied. edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): Masking threshold of guidance. Threshold should be proportional to the image region that is modified. 'edit_threshold' is defined as 'λ' of equation 12 of [LEDITS++ Paper](https://huggingface.co/papers/2301.12247). sem_guidance (`List[torch.Tensor]`, *optional*): List of pre-generated guidance vectors to be applied at generation. Length of the list has to correspond to `num_inference_steps`. use_cross_attn_mask: Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of [LEDITS++ paper](https://huggingface.co/papers/2311.16711). use_intersect_mask: Whether the masking term is calculated as intersection of cross-attention masks and masks derived from the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise estimate are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://huggingface.co/papers/2311.16711). user_mask: User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s implicit masks do not meet user preferences. attn_store_steps: Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes. store_averaged_over_steps: Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. If False, attention maps for each step are stores separately. Just for visualization purposes. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`: [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ if self.inversion_steps is None: raise ValueError( "You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)." ) eta = self.eta num_images_per_prompt = 1 latents = self.init_latents zs = self.zs self.scheduler.set_timesteps(len(self.scheduler.timesteps)) if use_intersect_mask: use_cross_attn_mask = True if use_cross_attn_mask: self.smoothing = LeditsGaussianSmoothing(self.device) if user_mask is not None: user_mask = user_mask.to(self.device) # TODO: Check inputs # 1. Check inputs. Raise error if not correct # self.check_inputs( # callback_steps, # negative_prompt, # negative_prompt_2, # prompt_embeds, # negative_prompt_embeds, # pooled_prompt_embeds, # negative_pooled_prompt_embeds, # ) self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end # 2. Define call parameters batch_size = self.batch_size device = self._execution_device if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] self.enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeddings is not None: enable_edit_guidance = True self.enabled_editing_prompts = editing_prompt_embeddings.shape[0] else: self.enabled_editing_prompts = 0 enable_edit_guidance = False # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) ( prompt_embeds, edit_prompt_embeds, negative_pooled_prompt_embeds, pooled_edit_embeds, num_edit_tokens, ) = self.encode_prompt( device=device, num_images_per_prompt=num_images_per_prompt, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_embeds=negative_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, enable_edit_guidance=enable_edit_guidance, editing_prompt=editing_prompt, editing_prompt_embeds=editing_prompt_embeddings, editing_pooled_prompt_embeds=editing_pooled_prompt_embeds, ) # 4. Prepare timesteps # self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.inversion_steps t_to_idx = {int(v): k for k, v in enumerate(timesteps)} if use_cross_attn_mask: self.attention_store = LeditsAttentionStore( average=store_averaged_over_steps, batch_size=batch_size, max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0), max_resolution=None, ) self.prepare_unet(self.attention_store) resolution = latents.shape[-2:] att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) # 5. Prepare latent variables latents = self.prepare_latents(device=device, latents=latents) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(eta) if self.text_encoder_2 is None: text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim # 7. Prepare added time ids & embeddings add_text_embeds = negative_pooled_prompt_embeds add_time_ids = self._get_add_time_ids( self.size, crops_coords_top_left, self.size, dtype=negative_pooled_prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if enable_edit_guidance: prompt_embeds = torch.cat([prompt_embeds, edit_prompt_embeds], dim=0) add_text_embeds = torch.cat([add_text_embeds, pooled_edit_embeds], dim=0) edit_concepts_time_ids = add_time_ids.repeat(edit_prompt_embeds.shape[0], 1) add_time_ids = torch.cat([add_time_ids, edit_concepts_time_ids], dim=0) self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) if ip_adapter_image is not None: # TODO: fix image encoding image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) image_embeds = image_embeds.to(device) # 8. Denoising loop self.sem_guidance = None self.activation_mask = None if ( self.denoising_end is not None and isinstance(self.denoising_end, float) and self.denoising_end > 0 and self.denoising_end < 1 ): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 9. Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=self._num_timesteps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None: added_cond_kwargs["image_embeds"] = image_embeds noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64] noise_pred_uncond = noise_pred_out[0] noise_pred_edit_concepts = noise_pred_out[1:] noise_guidance_edit = torch.zeros( noise_pred_uncond.shape, device=self.device, dtype=noise_pred_uncond.dtype, ) if sem_guidance is not None and len(sem_guidance) > i: noise_guidance_edit += sem_guidance[i].to(self.device) elif enable_edit_guidance: if self.activation_mask is None: self.activation_mask = torch.zeros( (len(timesteps), self.enabled_editing_prompts, *noise_pred_edit_concepts[0].shape) ) if self.sem_guidance is None: self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) # noise_guidance_edit = torch.zeros_like(noise_guidance) for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if i < edit_warmup_steps_c: continue if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_cooldown_steps_c: continue noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c if user_mask is not None: noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask if use_cross_attn_mask: out = self.attention_store.aggregate_attention( attention_maps=self.attention_store.step_store, prompts=self.text_cross_attention_maps, res=att_res, from_where=["up", "down"], is_cross=True, select=self.text_cross_attention_maps.index(editing_prompt[c]), ) attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext # average over all tokens if attn_map.shape[3] != num_edit_tokens[c]: raise ValueError( f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!" ) attn_map = torch.sum(attn_map, dim=3) # gaussian_smoothing attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect") attn_map = self.smoothing(attn_map).squeeze(1) # torch.quantile function expects float32 if attn_map.dtype == torch.float32: tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) else: tmp = torch.quantile( attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1 ).to(attn_map.dtype) attn_mask = torch.where( attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0 ) # resolution must match latent space dimension attn_mask = F.interpolate( attn_mask.unsqueeze(1), noise_guidance_edit_tmp.shape[-2:], # 64,64 ).repeat(1, 4, 1, 1) self.activation_mask[i, c] = attn_mask.detach().cpu() if not use_intersect_mask: noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask if use_intersect_mask: noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum( noise_guidance_edit_tmp_quantile, dim=1, keepdim=True ) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat( 1, self.unet.config.in_channels, 1, 1 ) # torch.quantile function expects float32 if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp_quantile.dtype) intersect_mask = ( torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp), ) * attn_mask ) self.activation_mask[i, c] = intersect_mask.detach().cpu() noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask elif not use_cross_attn_mask: # calculate quantile noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum( noise_guidance_edit_tmp_quantile, dim=1, keepdim=True ) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) # torch.quantile function expects float32 if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp_quantile.dtype) self.activation_mask[i, c] = ( torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp), ) .detach() .cpu() ) noise_guidance_edit_tmp = torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp), ) noise_guidance_edit += noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit.detach().cpu() noise_pred = noise_pred_uncond + noise_guidance_edit # compute the previous noisy sample x_t -> x_t-1 if enable_edit_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg( noise_pred, noise_pred_edit_concepts.mean(dim=0, keepdim=False), guidance_rescale=self.guidance_rescale, ) idx = t_to_idx[int(t)] latents = self.scheduler.step( noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs, return_dict=False )[0] # step callback if use_cross_attn_mask: store_step = i in attn_store_steps self.attention_store.between_steps(store_step) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) # negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > 0 and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=None) @torch.no_grad() # Modified from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.encode_image def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None): image = self.image_processor.preprocess( image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords ) height, width = image.shape[-2:] if height % 32 != 0 or width % 32 != 0: raise ValueError( "Image height and width must be a factor of 32. " "Consider down-sampling the input using the `height` and `width` parameters" ) resized = self.image_processor.postprocess(image=image, output_type="pil") if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5: logger.warning( "Your input images far exceed the default resolution of the underlying diffusion model. " "The output images may contain severe artifacts! " "Consider down-sampling the input using the `height` and `width` parameters" ) image = image.to(self.device, dtype=dtype) needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: image = image.float() self.upcast_vae() x0 = self.vae.encode(image).latent_dist.mode() x0 = x0.to(dtype) # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) x0 = self.vae.config.scaling_factor * x0 return x0, resized @torch.no_grad() def invert( self, image: PipelineImageInput, source_prompt: str = "", source_guidance_scale=3.5, negative_prompt: str = None, negative_prompt_2: str = None, num_inversion_steps: int = 50, skip: float = 0.15, generator: Optional[torch.Generator] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), num_zero_noise_steps: int = 3, cross_attention_kwargs: Optional[Dict[str, Any]] = None, height: Optional[int] = None, width: Optional[int] = None, resize_mode: Optional[str] = "default", crops_coords: Optional[Tuple[int, int, int, int]] = None, ): r""" The function to the pipeline for image inversion as described by the [LEDITS++ Paper](https://huggingface.co/papers/2301.12247). If the scheduler is set to [`~schedulers.DDIMScheduler`] the inversion proposed by [edit-friendly DPDM](https://huggingface.co/papers/2304.06140) will be performed instead. Args: image (`PipelineImageInput`): Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect ratio. source_prompt (`str`, defaults to `""`): Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled if the `source_prompt` is `""`. source_guidance_scale (`float`, defaults to `3.5`): Strength of guidance during inversion. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_inversion_steps (`int`, defaults to `50`): Number of total performed inversion steps after discarding the initial `skip` steps. skip (`float`, defaults to `0.15`): Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values will lead to stronger changes to the input image. `skip` has to be between `0` and `1`. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make inversion deterministic. crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). num_zero_noise_steps (`int`, defaults to `3`): Number of final diffusion steps that will not renoise the current image. If no steps are set to zero SD-XL in combination with [`DPMSolverMultistepScheduler`] will produce noise artifacts. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: [`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: Output will contain the resized input image(s) and respective VAE reconstruction(s). """ if height is not None and height % 32 != 0 or width is not None and width % 32 != 0: raise ValueError("height and width must be a factor of 32.") # Reset attn processor, we do not want to store attn maps during inversion self.unet.set_attn_processor(AttnProcessor()) self.eta = 1.0 self.scheduler.config.timestep_spacing = "leading" self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] timesteps = self.inversion_steps num_images_per_prompt = 1 device = self._execution_device # 0. Ensure that only uncond embedding is used if prompt = "" if source_prompt == "": # noise pred should only be noise_pred_uncond source_guidance_scale = 0.0 do_classifier_free_guidance = False else: do_classifier_free_guidance = source_guidance_scale > 1.0 # 1. prepare image x0, resized = self.encode_image( image, dtype=self.text_encoder_2.dtype, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords, ) width = x0.shape[2] * self.vae_scale_factor height = x0.shape[3] * self.vae_scale_factor self.size = (height, width) self.batch_size = x0.shape[0] # 2. get embeddings text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) if isinstance(source_prompt, str): source_prompt = [source_prompt] * self.batch_size ( negative_prompt_embeds, prompt_embeds, negative_pooled_prompt_embeds, edit_pooled_prompt_embeds, _, ) = self.encode_prompt( device=device, num_images_per_prompt=num_images_per_prompt, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, editing_prompt=source_prompt, lora_scale=text_encoder_lora_scale, enable_edit_guidance=do_classifier_free_guidance, ) if self.text_encoder_2 is None: text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim # 3. Prepare added time ids & embeddings add_text_embeds = negative_pooled_prompt_embeds add_time_ids = self._get_add_time_ids( self.size, crops_coords_top_left, self.size, dtype=negative_prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if do_classifier_free_guidance: negative_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([add_text_embeds, edit_pooled_prompt_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) negative_prompt_embeds = negative_prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(self.batch_size * num_images_per_prompt, 1) # autoencoder reconstruction if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: self.upcast_vae() x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image_rec = self.vae.decode( x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator )[0] elif self.vae.config.force_upcast: x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image_rec = self.vae.decode( x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator )[0] else: image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] image_rec = self.image_processor.postprocess(image_rec, output_type="pil") # 5. find zs and xts variance_noise_shape = (num_inversion_steps, *x0.shape) # intermediate latents t_to_idx = {int(v): k for k, v in enumerate(timesteps)} xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype) for t in reversed(timesteps): idx = num_inversion_steps - t_to_idx[int(t)] - 1 noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) xts[idx] = self.scheduler.add_noise(x0, noise, t.unsqueeze(0)) xts = torch.cat([x0.unsqueeze(0), xts], dim=0) # noise maps zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype) self.scheduler.set_timesteps(len(self.scheduler.timesteps)) for t in self.progress_bar(timesteps): idx = num_inversion_steps - t_to_idx[int(t)] - 1 # 1. predict noise residual xt = xts[idx + 1] latent_model_input = torch.cat([xt] * 2) if do_classifier_free_guidance else xt latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=negative_prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # 2. perform guidance if do_classifier_free_guidance: noise_pred_out = noise_pred.chunk(2) noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] noise_pred = noise_pred_uncond + source_guidance_scale * (noise_pred_text - noise_pred_uncond) xtm1 = xts[idx] z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) zs[idx] = z # correction to avoid error accumulation xts[idx] = xtm1_corrected self.init_latents = xts[-1] zs = zs.flip(0) if num_zero_noise_steps > 0: zs[-num_zero_noise_steps:] = torch.zeros_like(zs[-num_zero_noise_steps:]) self.zs = zs return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise_ddim def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): # 1. get previous step value (=t-1) prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps # 2. compute alphas, betas alpha_prod_t = scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = ( scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502 pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) # 4. Clip "predicted x_0" if scheduler.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = scheduler._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://huggingface.co/papers/2010.02502 pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred # modified so that updated xtm1 is returned as well (to avoid error accumulation) mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if variance > 0.0: noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta) else: noise = torch.tensor([0.0]).to(latents.device) return noise, mu_xt + (eta * variance**0.5) * noise # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise_sde_dpm_pp_2nd def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): def first_order_update(model_output, sample): # timestep, prev_timestep, sample): sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index] alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output mu_xt = scheduler.dpm_solver_first_order_update( model_output=model_output, sample=sample, noise=torch.zeros_like(sample) ) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return noise, prev_sample def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample): sigma_t, sigma_s0, sigma_s1 = ( scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index], scheduler.sigmas[scheduler.step_index - 1], ) alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) m0, m1 = model_output_list[-1], model_output_list[-2] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) mu_xt = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 ) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return noise, prev_sample if scheduler.step_index is None: scheduler._init_step_index(timestep) model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) for i in range(scheduler.config.solver_order - 1): scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] scheduler.model_outputs[-1] = model_output if scheduler.lower_order_nums < 1: noise, prev_sample = first_order_update(model_output, latents) else: noise, prev_sample = second_order_update(scheduler.model_outputs, latents) if scheduler.lower_order_nums < scheduler.config.solver_order: scheduler.lower_order_nums += 1 # upon completion increase step index by one scheduler._step_index += 1 return noise, prev_sample # Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise def compute_noise(scheduler, *args): if isinstance(scheduler, DDIMScheduler): return compute_noise_ddim(scheduler, *args) elif ( isinstance(scheduler, DPMSolverMultistepScheduler) and scheduler.config.algorithm_type == "sde-dpmsolver++" and scheduler.config.solver_order == 2 ): return compute_noise_sde_dpm_pp_2nd(scheduler, *args) else: raise NotImplementedError
diffusers/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 42493 }
177
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from ...utils import deprecate, is_torch_xla_available, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .image_encoder import PaintByExampleImageEncoder if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def prepare_mask_and_masked_image(image, mask): """ Prepares a pair (image, mask) to be consumed by the Paint by Example pipeline. This means that those inputs will be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the ``image`` and ``1`` for the ``mask``. The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. Args: image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. mask (_type_): The mask to apply to the image, i.e. regions to inpaint. It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. Raises: ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not (ot the other way around). Returns: tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. """ if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) # Batch and add channel dim for single mask if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) # Batch single mask or add channel dim if mask.ndim == 3: # Batched mask if mask.shape[0] == image.shape[0]: mask = mask.unsqueeze(1) else: mask = mask.unsqueeze(0) assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" assert mask.shape[1] == 1, "Mask image must have a single channel" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") # Check mask is in [0, 1] if mask.min() < 0 or mask.max() > 1: raise ValueError("Mask should be in [0, 1] range") # paint-by-example inverses the mask mask = 1 - mask # Binarize mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 # Image as float32 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") else: if isinstance(image, PIL.Image.Image): image = [image] image = np.concatenate([np.array(i.convert("RGB"))[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 # preprocess mask if isinstance(mask, PIL.Image.Image): mask = [mask] mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 # paint-by-example inverses the mask mask = 1 - mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) masked_image = image * mask return mask, masked_image class PaintByExamplePipeline(DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin): _last_supported_version = "0.33.1" r""" <Tip warning={true}> 🧪 This is an experimental feature! </Tip> Pipeline for image-guided image inpainting using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. image_encoder ([`PaintByExampleImageEncoder`]): Encodes the example input image. The `unet` is conditioned on the example image instead of a text prompt. tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ # TODO: feature_extractor is required to encode initial images (if they are in PIL format), # we should give a descriptive message if the pipeline doesn't have one. model_cpu_offload_seq = "unet->vae" _exclude_from_cpu_offload = ["image_encoder"] _optional_components = ["safety_checker"] def __init__( self, vae: AutoencoderKL, image_encoder: PaintByExampleImageEncoder, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = False, ): super().__init__() self.register_modules( vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs def check_inputs(self, image, height, width, callback_steps): if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = self._encode_vae_image(masked_image, generator=generator) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline._encode_vae_image def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(images=image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeddings, negative_prompt_embeds = self.image_encoder(image, return_uncond_vector=True) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_prompt_embeds = negative_prompt_embeds.repeat(1, image_embeddings.shape[0], 1) negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, 1, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings @torch.no_grad() def __call__( self, example_image: Union[torch.Tensor, PIL.Image.Image], image: Union[torch.Tensor, PIL.Image.Image], mask_image: Union[torch.Tensor, PIL.Image.Image], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" The call function to the pipeline for generation. Args: example_image (`torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): An example image to guide image generation. image (`torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): `Image` or tensor representing an image batch to be inpainted (parts of the image are masked out with `mask_image` and repainted according to `prompt`). mask_image (`torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted, while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Example: ```py >>> import PIL >>> import requests >>> import torch >>> from io import BytesIO >>> from diffusers import PaintByExamplePipeline >>> def download_image(url): ... response = requests.get(url) ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") >>> img_url = ( ... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/image/example_1.png" ... ) >>> mask_url = ( ... "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/mask/example_1.png" ... ) >>> example_url = "https://raw.githubusercontent.com/Fantasy-Studio/Paint-by-Example/main/examples/reference/example_1.jpg" >>> init_image = download_image(img_url).resize((512, 512)) >>> mask_image = download_image(mask_url).resize((512, 512)) >>> example_image = download_image(example_url).resize((512, 512)) >>> pipe = PaintByExamplePipeline.from_pretrained( ... "Fantasy-Studio/Paint-by-Example", ... torch_dtype=torch.float16, ... ) >>> pipe = pipe.to("cuda") >>> image = pipe(image=init_image, mask_image=mask_image, example_image=example_image).images[0] >>> image ``` Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 1. Define call parameters if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) else: batch_size = image.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 2. Preprocess mask and image mask, masked_image = prepare_mask_and_masked_image(image, mask_image) height, width = masked_image.shape[-2:] # 3. Check inputs self.check_inputs(example_image, height, width, callback_steps) # 4. Encode input image image_embeddings = self._encode_image( example_image, device, num_images_per_prompt, do_classifier_free_guidance ) # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents, ) # 7. Prepare mask latent variables mask, masked_image_latents = self.prepare_mask_latents( mask, masked_image, batch_size * num_images_per_prompt, height, width, image_embeddings.dtype, device, generator, do_classifier_free_guidance, ) # 8. Check that sizes of mask, masked image and latents match num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 10. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents # concat latents, mask, masked_image_latents in the channel dimension latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) latent_model_input = torch.cat([latent_model_input, masked_image_latents, mask], dim=1) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() self.maybe_free_model_hooks() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, image_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py", "repo_id": "diffusers", "token_count": 13132 }
178
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_sana"] = ["SanaPipeline"] _import_structure["pipeline_sana_controlnet"] = ["SanaControlNetPipeline"] _import_structure["pipeline_sana_sprint"] = ["SanaSprintPipeline"] _import_structure["pipeline_sana_sprint_img2img"] = ["SanaSprintImg2ImgPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_sana import SanaPipeline from .pipeline_sana_controlnet import SanaControlNetPipeline from .pipeline_sana_sprint import SanaSprintPipeline from .pipeline_sana_sprint_img2img import SanaSprintImg2ImgPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/sana/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/sana/__init__.py", "repo_id": "diffusers", "token_count": 694 }
179
# Copyright 2025 The SkyReels-V2 Team, The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from typing import Any, Callable, Dict, List, Optional, Union import regex as re import torch from transformers import AutoTokenizer, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import SkyReelsV2LoraLoaderMixin from ...models import AutoencoderKLWan, SkyReelsV2Transformer3DModel from ...schedulers import UniPCMultistepScheduler from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import SkyReelsV2PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """\ Examples: ```py >>> import torch >>> from diffusers import ( ... SkyReelsV2Pipeline, ... UniPCMultistepScheduler, ... AutoencoderKLWan, ... ) >>> from diffusers.utils import export_to_video >>> # Load the pipeline >>> # Available models: >>> # - Skywork/SkyReels-V2-T2V-14B-540P-Diffusers >>> # - Skywork/SkyReels-V2-T2V-14B-720P-Diffusers >>> vae = AutoencoderKLWan.from_pretrained( ... "Skywork/SkyReels-V2-T2V-14B-720P-Diffusers", ... subfolder="vae", ... torch_dtype=torch.float32, ... ) >>> pipe = SkyReelsV2Pipeline.from_pretrained( ... "Skywork/SkyReels-V2-T2V-14B-720P-Diffusers", ... vae=vae, ... torch_dtype=torch.bfloat16, ... ) >>> flow_shift = 8.0 # 8.0 for T2V, 5.0 for I2V >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift) >>> pipe = pipe.to("cuda") >>> prompt = "A cat and a dog baking a cake together in a kitchen. The cat is carefully measuring flour, while the dog is stirring the batter with a wooden spoon. The kitchen is cozy, with sunlight streaming through the window." >>> output = pipe( ... prompt=prompt, ... num_inference_steps=50, ... height=544, ... width=960, ... guidance_scale=6.0, # 6.0 for T2V, 5.0 for I2V ... num_frames=97, ... ).frames[0] >>> export_to_video(output, "video.mp4", fps=24, quality=8) ``` """ def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): text = whitespace_clean(basic_clean(text)) return text class SkyReelsV2Pipeline(DiffusionPipeline, SkyReelsV2LoraLoaderMixin): r""" Pipeline for Text-to-Video (t2v) generation using SkyReels-V2. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: tokenizer ([`T5Tokenizer`]): Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. transformer ([`SkyReelsV2Transformer3DModel`]): Conditional Transformer to denoise the input latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: UMT5EncoderModel, transformer: SkyReelsV2Transformer3DModel, vae: AutoencoderKLWan, scheduler: UniPCMultistepScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_clean(u) for u in prompt] batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask seq_lens = mask.gt(0).sum(dim=1).long() prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def check_inputs( self, prompt, negative_prompt, height, width, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.prepare_latents def prepare_latents( self, batch_size: int, num_channels_latents: int = 16, height: int = 480, width: int = 832, num_frames: int = 81, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 shape = ( batch_size, num_channels_latents, num_latent_frames, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @property def attention_kwargs(self): return self._attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: Union[str, List[str]] = None, height: int = 544, width: int = 960, num_frames: int = 97, num_inference_steps: int = 50, guidance_scale: float = 6.0, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "np", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, defaults to `544`): The height in pixels of the generated image. width (`int`, defaults to `960`): The width in pixels of the generated image. num_frames (`int`, defaults to `97`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `6.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`SkyReelsV2PipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to `512`): The maximum sequence length for the text encoder. Examples: Returns: [`~SkyReelsV2PipelineOutput`] or `tuple`: If `return_dict` is `True`, [`SkyReelsV2PipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt, height, width, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, ) if num_frames % self.vae_scale_factor_temporal != 1: logger.warning( f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number." ) num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 num_frames = max(num_frames, 1) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) transformer_dtype = self.transformer.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) if negative_prompt_embeds is not None: negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, ) # 6. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = latents.to(transformer_dtype) timestep = t.expand(latents.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_uncond = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return SkyReelsV2PipelineOutput(frames=video)
diffusers/src/diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2.py", "repo_id": "diffusers", "token_count": 12234 }
180
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, PriorTransformer, UNet2DConditionModel from ...models.embeddings import get_timestep_embedding from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableUnCLIPPipeline >>> pipe = StableUnCLIPPipeline.from_pretrained( ... "fusing/stable-unclip-2-1-l", torch_dtype=torch.float16 ... ) # TODO update model path >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> images = pipe(prompt).images >>> images[0].save("astronaut_horse.png") ``` """ class StableUnCLIPPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin ): """ Pipeline for text-to-image generation using stable unCLIP. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: prior_tokenizer ([`CLIPTokenizer`]): A [`CLIPTokenizer`]. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen [`CLIPTextModelWithProjection`] text-encoder. prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_scheduler ([`KarrasDiffusionSchedulers`]): Scheduler used in the prior denoising process. image_normalizer ([`StableUnCLIPImageNormalizer`]): Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image embeddings after the noise has been applied. image_noising_scheduler ([`KarrasDiffusionSchedulers`]): Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined by the `noise_level`. tokenizer ([`CLIPTokenizer`]): A [`CLIPTokenizer`]. text_encoder ([`CLIPTextModel`]): Frozen [`CLIPTextModel`] text-encoder. unet ([`UNet2DConditionModel`]): A [`UNet2DConditionModel`] to denoise the encoded image latents. scheduler ([`KarrasDiffusionSchedulers`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. """ _exclude_from_cpu_offload = ["prior", "image_normalizer"] model_cpu_offload_seq = "text_encoder->prior_text_encoder->unet->vae" # prior components prior_tokenizer: CLIPTokenizer prior_text_encoder: CLIPTextModelWithProjection prior: PriorTransformer prior_scheduler: KarrasDiffusionSchedulers # image noising components image_normalizer: StableUnCLIPImageNormalizer image_noising_scheduler: KarrasDiffusionSchedulers # regular denoising components tokenizer: CLIPTokenizer text_encoder: CLIPTextModel unet: UNet2DConditionModel scheduler: KarrasDiffusionSchedulers vae: AutoencoderKL def __init__( self, # prior components prior_tokenizer: CLIPTokenizer, prior_text_encoder: CLIPTextModelWithProjection, prior: PriorTransformer, prior_scheduler: KarrasDiffusionSchedulers, # image noising components image_normalizer: StableUnCLIPImageNormalizer, image_noising_scheduler: KarrasDiffusionSchedulers, # regular denoising components tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, # vae vae: AutoencoderKL, ): super().__init__() self.register_modules( prior_tokenizer=prior_tokenizer, prior_text_encoder=prior_text_encoder, prior=prior, prior_scheduler=prior_scheduler, image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, vae=vae, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder def _encode_prior_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.prior_tokenizer( prompt, padding="max_length", max_length=self.prior_tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.prior_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.prior_tokenizer.batch_decode( untruncated_ids[:, self.prior_tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.prior_tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.prior_tokenizer.model_max_length] prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device)) prompt_embeds = prior_text_encoder_output.text_embeds text_enc_hid_states = prior_text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.prior_tokenizer( uncond_tokens, padding="max_length", max_length=self.prior_tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_prior_text_encoder_output = self.prior_text_encoder( uncond_input.input_ids.to(device) ) negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds uncond_text_enc_hid_states = negative_prompt_embeds_prior_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_enc_hid_states.shape[1] uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_enc_hid_states, text_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs with prepare_extra_step_kwargs->prepare_prior_extra_step_kwargs, scheduler->prior_scheduler def prepare_prior_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the prior_scheduler step, since not all prior_schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other prior_schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the prior_scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, noise_level, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two." ) if prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined." ) if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: raise ValueError( f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def noise_image_embeddings( self, image_embeds: torch.Tensor, noise_level: int, noise: Optional[torch.Tensor] = None, generator: Optional[torch.Generator] = None, ): """ Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher `noise_level` increases the variance in the final un-noised images. The noise is applied in two ways: 1. A noise schedule is applied directly to the embeddings. 2. A vector of sinusoidal time embeddings are appended to the output. In both cases, the amount of noise is controlled by the same `noise_level`. The embeddings are normalized before the noise is applied and un-normalized after the noise is applied. """ if noise is None: noise = randn_tensor( image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype ) noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) self.image_normalizer.to(image_embeds.device) image_embeds = self.image_normalizer.scale(image_embeds) image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) image_embeds = self.image_normalizer.unscale(image_embeds) noise_level = get_timestep_embedding( timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0 ) # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors, # but we might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. noise_level = noise_level.to(image_embeds.dtype) image_embeds = torch.cat((image_embeds, noise_level), 1) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, # regular denoising process args prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 20, guidance_scale: float = 10.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, noise_level: int = 0, # prior args prior_num_inference_steps: int = 25, prior_guidance_scale: float = 4.0, prior_latents: Optional[torch.Tensor] = None, clip_skip: Optional[int] = None, ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 20): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 10.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). noise_level (`int`, *optional*, defaults to `0`): The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps in the prior denoising process. More denoising steps usually lead to a higher quality image at the expense of slower inference. prior_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. prior_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image embedding generation in the prior denoising process. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_steps=callback_steps, noise_level=noise_level, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. prior_do_classifier_free_guidance = prior_guidance_scale > 1.0 # 3. Encode input prompt prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask = self._encode_prior_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=prior_do_classifier_free_guidance, ) # 4. Prepare prior timesteps self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps # 5. Prepare prior latent variables embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prior_prompt_embeds.dtype, device, generator, prior_latents, self.prior_scheduler, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline prior_extra_step_kwargs = self.prepare_prior_extra_step_kwargs(generator, eta) # 7. Prior denoising loop for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if prior_do_classifier_free_guidance else prior_latents latent_model_input = self.prior_scheduler.scale_model_input(latent_model_input, t) predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prior_prompt_embeds, encoder_hidden_states=prior_text_encoder_hidden_states, attention_mask=prior_text_mask, ).predicted_image_embedding if prior_do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, **prior_extra_step_kwargs, return_dict=False, )[0] if callback is not None and i % callback_steps == 0: callback(i, t, prior_latents) prior_latents = self.prior.post_process_latents(prior_latents) image_embeds = prior_latents # done prior # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 8. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 9. Prepare image embeddings image_embeds = self.noise_image_embeddings( image_embeds=image_embeds, noise_level=noise_level, generator=generator, ) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeds) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) # 10. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 11. Prepare latent variables num_channels_latents = self.unet.config.in_channels shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) latents = self.prepare_latents( shape=shape, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=latents, scheduler=self.scheduler, ) # 12. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 13. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, class_labels=image_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py", "repo_id": "diffusers", "token_count": 19927 }
181
# Copyright 2025 The GLIGEN Authors and HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import warnings from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, ) from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention import GatedSelfAttentionDense from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.clip_image_project_model import CLIPImageProjection from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionGLIGENTextImagePipeline >>> from diffusers.utils import load_image >>> # Insert objects described by image at the region defined by bounding boxes >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( ... "anhnct/Gligen_Inpainting_Text_Image", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> input_image = load_image( ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" ... ) >>> prompt = "a backpack" >>> boxes = [[0.2676, 0.4088, 0.4773, 0.7183]] >>> phrases = None >>> gligen_image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/backpack.jpeg" ... ) >>> images = pipe( ... prompt=prompt, ... gligen_phrases=phrases, ... gligen_inpaint_image=input_image, ... gligen_boxes=boxes, ... gligen_images=[gligen_image], ... gligen_scheduled_sampling_beta=1, ... output_type="pil", ... num_inference_steps=50, ... ).images >>> images[0].save("./gligen-inpainting-text-image-box.jpg") >>> # Generate an image described by the prompt and >>> # insert objects described by text and image at the region defined by bounding boxes >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( ... "anhnct/Gligen_Text_Image", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> prompt = "a flower sitting on the beach" >>> boxes = [[0.0, 0.09, 0.53, 0.76]] >>> phrases = ["flower"] >>> gligen_image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/pexels-pixabay-60597.jpg" ... ) >>> images = pipe( ... prompt=prompt, ... gligen_phrases=phrases, ... gligen_images=[gligen_image], ... gligen_boxes=boxes, ... gligen_scheduled_sampling_beta=1, ... output_type="pil", ... num_inference_steps=50, ... ).images >>> images[0].save("./gligen-generation-text-image-box.jpg") >>> # Generate an image described by the prompt and >>> # transfer style described by image at the region defined by bounding boxes >>> pipe = StableDiffusionGLIGENTextImagePipeline.from_pretrained( ... "anhnct/Gligen_Text_Image", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> prompt = "a dragon flying on the sky" >>> boxes = [[0.4, 0.2, 1.0, 0.8], [0.0, 1.0, 0.0, 1.0]] # Set `[0.0, 1.0, 0.0, 1.0]` for the style >>> gligen_image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" ... ) >>> gligen_placeholder = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/landscape.png" ... ) >>> images = pipe( ... prompt=prompt, ... gligen_phrases=[ ... "dragon", ... "placeholder", ... ], # Can use any text instead of `placeholder` token, because we will use mask here ... gligen_images=[ ... gligen_placeholder, ... gligen_image, ... ], # Can use any image in gligen_placeholder, because we will use mask here ... input_phrases_mask=[1, 0], # Set 0 for the placeholder token ... input_images_mask=[0, 1], # Set 0 for the placeholder image ... gligen_boxes=boxes, ... gligen_scheduled_sampling_beta=1, ... output_type="pil", ... num_inference_steps=50, ... ).images >>> images[0].save("./gligen-generation-text-image-box-style-transfer.jpg") ``` """ class StableDiffusionGLIGENTextImagePipeline(DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Stable Diffusion with Grounded-Language-to-Image Generation (GLIGEN). This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. processor ([`~transformers.CLIPProcessor`]): A `CLIPProcessor` to process reference image. image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): Frozen image-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). image_project ([`CLIPImageProjection`]): A `CLIPImageProjection` to project image embedding into phrases embedding space. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ _last_supported_version = "0.33.1" model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, processor: CLIPProcessor, image_encoder: CLIPVisionModelWithProjection, image_project: CLIPImageProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, image_encoder=image_encoder, processor=processor, image_project=image_project, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, gligen_images, gligen_phrases, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if gligen_images is not None and gligen_phrases is not None: if len(gligen_images) != len(gligen_phrases): raise ValueError( "`gligen_images` and `gligen_phrases` must have the same length when both are provided, but" f" got: `gligen_images` with length {len(gligen_images)} != `gligen_phrases` with length {len(gligen_phrases)}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def enable_fuser(self, enabled=True): for module in self.unet.modules(): if type(module) is GatedSelfAttentionDense: module.enabled = enabled def draw_inpaint_mask_from_boxes(self, boxes, size): """ Create an inpainting mask based on given boxes. This function generates an inpainting mask using the provided boxes to mark regions that need to be inpainted. """ inpaint_mask = torch.ones(size[0], size[1]) for box in boxes: x0, x1 = box[0] * size[0], box[2] * size[0] y0, y1 = box[1] * size[1], box[3] * size[1] inpaint_mask[int(y0) : int(y1), int(x0) : int(x1)] = 0 return inpaint_mask def crop(self, im, new_width, new_height): """ Crop the input image to the specified dimensions. """ width, height = im.size left = (width - new_width) / 2 top = (height - new_height) / 2 right = (width + new_width) / 2 bottom = (height + new_height) / 2 return im.crop((left, top, right, bottom)) def target_size_center_crop(self, im, new_hw): """ Crop and resize the image to the target size while keeping the center. """ width, height = im.size if width != height: im = self.crop(im, min(height, width), min(height, width)) return im.resize((new_hw, new_hw), PIL.Image.LANCZOS) def complete_mask(self, has_mask, max_objs, device): """ Based on the input mask corresponding value `0 or 1` for each phrases and image, mask the features corresponding to phrases and images. """ mask = torch.ones(1, max_objs).type(self.text_encoder.dtype).to(device) if has_mask is None: return mask if isinstance(has_mask, int): return mask * has_mask else: for idx, value in enumerate(has_mask): mask[0, idx] = value return mask def get_clip_feature(self, input, normalize_constant, device, is_image=False): """ Get image and phrases embedding by using CLIP pretrain model. The image embedding is transformed into the phrases embedding space through a projection. """ if is_image: if input is None: return None inputs = self.processor(images=[input], return_tensors="pt").to(device) inputs["pixel_values"] = inputs["pixel_values"].to(self.image_encoder.dtype) outputs = self.image_encoder(**inputs) feature = outputs.image_embeds feature = self.image_project(feature).squeeze(0) feature = (feature / feature.norm()) * normalize_constant feature = feature.unsqueeze(0) else: if input is None: return None inputs = self.tokenizer(input, return_tensors="pt", padding=True).to(device) outputs = self.text_encoder(**inputs) feature = outputs.pooler_output return feature def get_cross_attention_kwargs_with_grounded( self, hidden_size, gligen_phrases, gligen_images, gligen_boxes, input_phrases_mask, input_images_mask, repeat_batch, normalize_constant, max_objs, device, ): """ Prepare the cross-attention kwargs containing information about the grounded input (boxes, mask, image embedding, phrases embedding). """ phrases, images = gligen_phrases, gligen_images images = [None] * len(phrases) if images is None else images phrases = [None] * len(images) if phrases is None else phrases boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) phrases_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) image_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) phrases_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) image_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) text_features = [] image_features = [] for phrase, image in zip(phrases, images): text_features.append(self.get_clip_feature(phrase, normalize_constant, device, is_image=False)) image_features.append(self.get_clip_feature(image, normalize_constant, device, is_image=True)) for idx, (box, text_feature, image_feature) in enumerate(zip(gligen_boxes, text_features, image_features)): boxes[idx] = torch.tensor(box) masks[idx] = 1 if text_feature is not None: phrases_embeddings[idx] = text_feature phrases_masks[idx] = 1 if image_feature is not None: image_embeddings[idx] = image_feature image_masks[idx] = 1 input_phrases_mask = self.complete_mask(input_phrases_mask, max_objs, device) phrases_masks = phrases_masks.unsqueeze(0).repeat(repeat_batch, 1) * input_phrases_mask input_images_mask = self.complete_mask(input_images_mask, max_objs, device) image_masks = image_masks.unsqueeze(0).repeat(repeat_batch, 1) * input_images_mask boxes = boxes.unsqueeze(0).repeat(repeat_batch, 1, 1) masks = masks.unsqueeze(0).repeat(repeat_batch, 1) phrases_embeddings = phrases_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1) image_embeddings = image_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1) out = { "boxes": boxes, "masks": masks, "phrases_masks": phrases_masks, "image_masks": image_masks, "phrases_embeddings": phrases_embeddings, "image_embeddings": image_embeddings, } return out def get_cross_attention_kwargs_without_grounded(self, hidden_size, repeat_batch, max_objs, device): """ Prepare the cross-attention kwargs without information about the grounded input (boxes, mask, image embedding, phrases embedding) (All are zero tensor). """ boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype) masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) phrases_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) image_masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype) phrases_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) image_embeddings = torch.zeros(max_objs, hidden_size, device=device, dtype=self.text_encoder.dtype) out = { "boxes": boxes.unsqueeze(0).repeat(repeat_batch, 1, 1), "masks": masks.unsqueeze(0).repeat(repeat_batch, 1), "phrases_masks": phrases_masks.unsqueeze(0).repeat(repeat_batch, 1), "image_masks": image_masks.unsqueeze(0).repeat(repeat_batch, 1), "phrases_embeddings": phrases_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1), "image_embeddings": image_embeddings.unsqueeze(0).repeat(repeat_batch, 1, 1), } return out @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, gligen_scheduled_sampling_beta: float = 0.3, gligen_phrases: List[str] = None, gligen_images: List[PIL.Image.Image] = None, input_phrases_mask: Union[int, List[int]] = None, input_images_mask: Union[int, List[int]] = None, gligen_boxes: List[List[float]] = None, gligen_inpaint_image: Optional[PIL.Image.Image] = None, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, gligen_normalize_constant: float = 28.7, clip_skip: int = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. gligen_phrases (`List[str]`): The phrases to guide what to include in each of the regions defined by the corresponding `gligen_boxes`. There should only be one phrase per bounding box. gligen_images (`List[PIL.Image.Image]`): The images to guide what to include in each of the regions defined by the corresponding `gligen_boxes`. There should only be one image per bounding box input_phrases_mask (`int` or `List[int]`): pre phrases mask input defined by the correspongding `input_phrases_mask` input_images_mask (`int` or `List[int]`): pre images mask input defined by the correspongding `input_images_mask` gligen_boxes (`List[List[float]]`): The bounding boxes that identify rectangular regions of the image that are going to be filled with the content described by the corresponding `gligen_phrases`. Each rectangular box is defined as a `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1]. gligen_inpaint_image (`PIL.Image.Image`, *optional*): The input image, if provided, is inpainted with objects described by the `gligen_boxes` and `gligen_phrases`. Otherwise, it is treated as a generation task on a blank input image. gligen_scheduled_sampling_beta (`float`, defaults to 0.3): Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image Generation](https://huggingface.co/papers/2301.07093). Scheduled Sampling factor is only varied for scheduled sampling during inference for improved quality and controllability. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). gligen_normalize_constant (`float`, *optional*, defaults to 28.7): The normalize value of the image embedding. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, gligen_images, gligen_phrases, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, clip_skip=clip_skip, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 5.1 Prepare GLIGEN variables max_objs = 30 if len(gligen_boxes) > max_objs: warnings.warn( f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.", FutureWarning, ) gligen_phrases = gligen_phrases[:max_objs] gligen_boxes = gligen_boxes[:max_objs] gligen_images = gligen_images[:max_objs] repeat_batch = batch_size * num_images_per_prompt if do_classifier_free_guidance: repeat_batch = repeat_batch * 2 if cross_attention_kwargs is None: cross_attention_kwargs = {} hidden_size = prompt_embeds.shape[2] cross_attention_kwargs["gligen"] = self.get_cross_attention_kwargs_with_grounded( hidden_size=hidden_size, gligen_phrases=gligen_phrases, gligen_images=gligen_images, gligen_boxes=gligen_boxes, input_phrases_mask=input_phrases_mask, input_images_mask=input_images_mask, repeat_batch=repeat_batch, normalize_constant=gligen_normalize_constant, max_objs=max_objs, device=device, ) cross_attention_kwargs_without_grounded = {} cross_attention_kwargs_without_grounded["gligen"] = self.get_cross_attention_kwargs_without_grounded( hidden_size=hidden_size, repeat_batch=repeat_batch, max_objs=max_objs, device=device ) # Prepare latent variables for GLIGEN inpainting if gligen_inpaint_image is not None: # if the given input image is not of the same size as expected by VAE # center crop and resize the input image to expected shape if gligen_inpaint_image.size != (self.vae.sample_size, self.vae.sample_size): gligen_inpaint_image = self.target_size_center_crop(gligen_inpaint_image, self.vae.sample_size) # Convert a single image into a batch of images with a batch size of 1 # The resulting shape becomes (1, C, H, W), where C is the number of channels, # and H and W are the height and width of the image. # scales the pixel values to a range [-1, 1] gligen_inpaint_image = self.image_processor.preprocess(gligen_inpaint_image) gligen_inpaint_image = gligen_inpaint_image.to(dtype=self.vae.dtype, device=self.vae.device) # Run AutoEncoder to get corresponding latents gligen_inpaint_latent = self.vae.encode(gligen_inpaint_image).latent_dist.sample() gligen_inpaint_latent = self.vae.config.scaling_factor * gligen_inpaint_latent # Generate an inpainting mask # pixel value = 0, where the object is present (defined by bounding boxes above) # 1, everywhere else gligen_inpaint_mask = self.draw_inpaint_mask_from_boxes(gligen_boxes, gligen_inpaint_latent.shape[2:]) gligen_inpaint_mask = gligen_inpaint_mask.to( dtype=gligen_inpaint_latent.dtype, device=gligen_inpaint_latent.device ) gligen_inpaint_mask = gligen_inpaint_mask[None, None] gligen_inpaint_mask_addition = torch.cat( (gligen_inpaint_latent * gligen_inpaint_mask, gligen_inpaint_mask), dim=1 ) # Convert a single mask into a batch of masks with a batch size of 1 gligen_inpaint_mask_addition = gligen_inpaint_mask_addition.expand(repeat_batch, -1, -1, -1).clone() int(gligen_scheduled_sampling_beta * len(timesteps)) self.enable_fuser(True) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if latents.shape[1] != 4: latents = torch.randn_like(latents[:, :4]) if gligen_inpaint_image is not None: gligen_inpaint_latent_with_noise = ( self.scheduler.add_noise( gligen_inpaint_latent, torch.randn_like(gligen_inpaint_latent), torch.tensor([t]) ) .expand(latents.shape[0], -1, -1, -1) .clone() ) latents = gligen_inpaint_latent_with_noise * gligen_inpaint_mask + latents * ( 1 - gligen_inpaint_mask ) # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if gligen_inpaint_image is not None: latent_model_input = torch.cat((latent_model_input, gligen_inpaint_mask_addition), dim=1) # predict the noise residual with grounded information noise_pred_with_grounding = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, ).sample # predict the noise residual without grounded information noise_pred_without_grounding = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs_without_grounded, ).sample # perform guidance if do_classifier_free_guidance: # Using noise_pred_text from noise residual with grounded information and noise_pred_uncond from noise residual without grounded information _, noise_pred_text = noise_pred_with_grounding.chunk(2) noise_pred_uncond, _ = noise_pred_without_grounding.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) else: noise_pred = noise_pred_with_grounding # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py", "repo_id": "diffusers", "token_count": 23219 }
182
from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from ...utils import BaseOutput, is_flax_available @dataclass class StableDiffusionXLPipelineOutput(BaseOutput): """ Output class for Stable Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: Union[List[PIL.Image.Image], np.ndarray] if is_flax_available(): import flax @flax.struct.dataclass class FlaxStableDiffusionXLPipelineOutput(BaseOutput): """ Output class for Flax Stable Diffusion XL pipelines. Args: images (`np.ndarray`) Array of shape `(batch_size, height, width, num_channels)` with images from the diffusion pipeline. """ images: np.ndarray
diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_output.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_output.py", "repo_id": "diffusers", "token_count": 401 }
183
import copy import inspect from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.nn.functional as F from torch.nn.functional import grid_sample from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import ( AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor, ) from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, BaseOutput, is_invisible_watermark_available, logging, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin if is_invisible_watermark_available(): from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker from ...utils import is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_0 def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_1 def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_3 def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_4 def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor2_0 class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoSDXLPipelineOutput(BaseOutput): """ Output class for zero-shot text-to-video pipeline. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: Union[List[PIL.Image.Image], np.ndarray] # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.coords_grid def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.warp_single_latent def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field_and_warp_latents def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class TextToVideoZeroSDXLPipeline( DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ): _last_supported_version = "0.33.1" r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def check_inputs( self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoZeroPipeline.forward_loop def forward_loop(self, x_t0, t0, t1, generator): """ Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. Args: x_t0: Latent code at time t0. t0: Timestep at t0. t1: Timestamp at t1. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. Returns: x_t1: Forward process applied to x_t0 from time t0 to t1. """ eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps return x_t1 def backward_loop( self, latents, timesteps, prompt_embeds, guidance_scale, callback, callback_steps, num_warmup_steps, extra_step_kwargs, add_text_embeds, add_time_ids, cross_attention_kwargs=None, guidance_rescale: float = 0.0, ): """ Perform backward process given list of time steps Args: latents: Latents at time timesteps[0]. timesteps: Time steps along which to perform backward process. prompt_embeds: Pre-generated text embeddings. guidance_scale: A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. extra_step_kwargs: Extra_step_kwargs. cross_attention_kwargs: A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). num_warmup_steps: number of warmup steps. Returns: latents: latents of backward process output at time timesteps[-1] """ do_classifier_free_guidance = guidance_scale > 1.0 num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order with self.progress_bar(total=num_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if XLA_AVAILABLE: xm.mark_step() return latents.clone().detach() @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], prompt_2: Optional[Union[str, List[str]]] = None, video_length: Optional[int] = 8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, denoising_end: Optional[float] = None, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, frame_ids: Optional[List[int]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, latents: Optional[torch.Tensor] = None, motion_field_strength_x: float = 12, motion_field_strength_y: float = 12, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, t0: int = 44, t1: int = 47, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders video_length (`int`, *optional*, defaults to 8): The number of generated video frames. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. frame_ids (`List[int]`, *optional*): Indexes of the frames that are being generated. This is used when generating longer videos chunk-by-chunk. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. motion_field_strength_x (`float`, *optional*, defaults to 12): Strength of motion in generated video along x-axis. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. motion_field_strength_y (`float`, *optional*, defaults to 12): Strength of motion in generated video along y-axis. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). guidance_rescale (`float`, *optional*, defaults to 0.7): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). t0 (`int`, *optional*, defaults to 44): Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. t1 (`int`, *optional*, defaults to 47): Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1. Returns: [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoSDXLPipelineOutput`] or `tuple`: [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoSDXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ assert video_length > 0 if frame_ids is None: frame_ids = list(range(video_length)) assert len(frame_ids) == video_length assert num_videos_per_prompt == 1 # set the processor original_attn_proc = self.unet.attn_processors processor = ( CrossFrameAttnProcessor2_0(batch_size=2) if hasattr(F, "scaled_dot_product_attention") else CrossFrameAttnProcessor(batch_size=2) ) self.unet.set_attn_processor(processor) if isinstance(prompt, str): prompt = [prompt] if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) # 2. Define call parameters batch_size = ( 1 if isinstance(prompt, str) else len(prompt) if isinstance(prompt, list) else prompt_embeds.shape[0] ) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_videos_per_prompt, 1) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order # Perform the first backward process up to time T_1 x_1_t1 = self.backward_loop( timesteps=timesteps[: -t1 - 1], prompt_embeds=prompt_embeds, latents=latents, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=num_warmup_steps, add_text_embeds=add_text_embeds, add_time_ids=add_time_ids, ) scheduler_copy = copy.deepcopy(self.scheduler) # Perform the second backward process up to time T_0 x_1_t0 = self.backward_loop( timesteps=timesteps[-t1 - 1 : -t0 - 1], prompt_embeds=prompt_embeds, latents=x_1_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, add_text_embeds=add_text_embeds, add_time_ids=add_time_ids, ) # Propagate first frame latents at time T_0 to remaining frames x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) # Add motion in latents at time T_0 x_2k_t0 = create_motion_field_and_warp_latents( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, latents=x_2k_t0, frame_ids=frame_ids[1:], ) # Perform forward process up to time T_1 x_2k_t1 = self.forward_loop( x_t0=x_2k_t0, t0=timesteps[-t0 - 1].to(torch.long), t1=timesteps[-t1 - 1].to(torch.long), generator=generator, ) # Perform backward process from time T_1 to 0 latents = torch.cat([x_1_t1, x_2k_t1]) self.scheduler = scheduler_copy timesteps = timesteps[-t1 - 1 :] b, l, d = prompt_embeds.size() prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) b, k = add_text_embeds.size() add_text_embeds = add_text_embeds[:, None].repeat(1, video_length, 1).reshape(b * video_length, k) b, k = add_time_ids.size() add_time_ids = add_time_ids[:, None].repeat(1, video_length, 1).reshape(b * video_length, k) # 7.1 Apply denoising_end if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] x_1k_0 = self.backward_loop( timesteps=timesteps, prompt_embeds=prompt_embeds, latents=latents, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, add_text_embeds=add_text_embeds, add_time_ids=add_time_ids, ) latents = x_1k_0 if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return TextToVideoSDXLPipelineOutput(images=image) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) self.maybe_free_model_hooks() # make sure to set the original attention processors back self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image,) return TextToVideoSDXLPipelineOutput(images=image)
diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py", "repo_id": "diffusers", "token_count": 28984 }
184
from .bnb_quantizer import BnB4BitDiffusersQuantizer, BnB8BitDiffusersQuantizer from .utils import dequantize_and_replace, dequantize_bnb_weight, replace_with_bnb_linear
diffusers/src/diffusers/quantizers/bitsandbytes/__init__.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/bitsandbytes/__init__.py", "repo_id": "diffusers", "token_count": 55 }
185
# Copyright 2025 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ...utils.torch_utils import randn_tensor from ..scheduling_utils import SchedulerMixin @dataclass class KarrasVeOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. derivative (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Derivative of predicted original image sample (x_0). pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor derivative: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None class KarrasVeScheduler(SchedulerMixin, ConfigMixin): """ A stochastic scheduler tailored to variance-expanding models. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. <Tip> For more details on the parameters, see [Appendix E](https://huggingface.co/papers/2206.00364). The grid search values used to find the optimal `{s_noise, s_churn, s_min, s_max}` for a specific model are described in Table 5 of the paper. </Tip> Args: sigma_min (`float`, defaults to 0.02): The minimum noise magnitude. sigma_max (`float`, defaults to 100): The maximum noise magnitude. s_noise (`float`, defaults to 1.007): The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, 1.011]. s_churn (`float`, defaults to 80): The parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. s_min (`float`, defaults to 0.05): The start value of the sigma range to add noise (enable stochasticity). A reasonable range is [0, 10]. s_max (`float`, defaults to 50): The end value of the sigma range to add noise. A reasonable range is [0.2, 80]. """ order = 2 @register_to_config def __init__( self, sigma_min: float = 0.02, sigma_max: float = 100, s_noise: float = 1.007, s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, ): # standard deviation of the initial noise distribution self.init_noise_sigma = sigma_max # setable values self.num_inference_steps: int = None self.timesteps: np.IntTensor = None self.schedule: torch.Tensor = None # sigma(t_i) def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.Tensor`: A scaled input sample. """ return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) schedule = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device) def add_noise_to_input( self, sample: torch.Tensor, sigma: float, generator: Optional[torch.Generator] = None ) -> Tuple[torch.Tensor, float]: """ Explicit Langevin-like "churn" step of adding noise to the sample according to a `gamma_i ≥ 0` to reach a higher noise level `sigma_hat = sigma_i + gamma_i*sigma_i`. Args: sample (`torch.Tensor`): The input sample. sigma (`float`): generator (`torch.Generator`, *optional*): A random number generator. """ if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) eps = self.config.s_noise * randn_tensor(sample.shape, generator=generator).to(sample.device) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def step( self, model_output: torch.Tensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.Tensor, return_dict: bool = True, ) -> Union[KarrasVeOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. sigma_hat (`float`): sigma_prev (`float`): sample_hat (`torch.Tensor`): return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample ) def step_correct( self, model_output: torch.Tensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.Tensor, sample_prev: torch.Tensor, derivative: torch.Tensor, return_dict: bool = True, ) -> Union[KarrasVeOutput, Tuple]: """ Corrects the predicted sample based on the `model_output` of the network. Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.Tensor`): TODO sample_prev (`torch.Tensor`): TODO derivative (`torch.Tensor`): TODO return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`. Returns: prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO """ pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample ) def add_noise(self, original_samples, noise, timesteps): raise NotImplementedError()
diffusers/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py", "repo_id": "diffusers", "token_count": 4068 }
186
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os from dataclasses import dataclass from enum import Enum from typing import Optional, Union import torch from huggingface_hub.utils import validate_hf_hub_args from typing_extensions import Self from ..utils import BaseOutput, PushToHubMixin SCHEDULER_CONFIG_NAME = "scheduler_config.json" # NOTE: We make this type an enum because it simplifies usage in docs and prevents # circular imports when used for `_compatibles` within the schedulers module. # When it's used as a type in pipelines, it really is a Union because the actual # scheduler instance is passed in. class KarrasDiffusionSchedulers(Enum): DDIMScheduler = 1 DDPMScheduler = 2 PNDMScheduler = 3 LMSDiscreteScheduler = 4 EulerDiscreteScheduler = 5 HeunDiscreteScheduler = 6 EulerAncestralDiscreteScheduler = 7 DPMSolverMultistepScheduler = 8 DPMSolverSinglestepScheduler = 9 KDPM2DiscreteScheduler = 10 KDPM2AncestralDiscreteScheduler = 11 DEISMultistepScheduler = 12 UniPCMultistepScheduler = 13 DPMSolverSDEScheduler = 14 EDMEulerScheduler = 15 AysSchedules = { "StableDiffusionTimesteps": [999, 850, 736, 645, 545, 455, 343, 233, 124, 24], "StableDiffusionSigmas": [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.0], "StableDiffusionXLTimesteps": [999, 845, 730, 587, 443, 310, 193, 116, 53, 13], "StableDiffusionXLSigmas": [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.0], "StableDiffusionVideoSigmas": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.0], } @dataclass class SchedulerOutput(BaseOutput): """ Base class for the output of a scheduler's `step` function. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: torch.Tensor class SchedulerMixin(PushToHubMixin): """ Base class for all schedulers. [`SchedulerMixin`] contains common functions shared by all schedulers such as general loading and saving functionalities. [`ConfigMixin`] takes care of storing the configuration attributes (like `num_train_timesteps`) that are passed to the scheduler's `__init__` function, and the attributes can be accessed by `scheduler.config.num_train_timesteps`. Class attributes: - **_compatibles** (`List[str]`) -- A list of scheduler classes that are compatible with the parent scheduler class. Use [`~ConfigMixin.from_config`] to load a different compatible scheduler class (should be overridden by parent class). """ config_name = SCHEDULER_CONFIG_NAME _compatibles = [] has_compatibles = True @classmethod @validate_hf_hub_args def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, subfolder: Optional[str] = None, return_unused_kwargs=False, **kwargs, ) -> Self: r""" Instantiate a scheduler from a pre-defined JSON configuration file in a local directory or Hub repository. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the scheduler configuration saved with [`~SchedulerMixin.save_pretrained`]. subfolder (`str`, *optional*): The subfolder location of a model file within a larger model repository on the Hub or locally. return_unused_kwargs (`bool`, *optional*, defaults to `False`): Whether kwargs that are not consumed by the Python class should be returned or not. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. <Tip> To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf auth login`. You can also activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> """ config, kwargs, commit_hash = cls.load_config( pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, return_unused_kwargs=True, return_commit_hash=True, **kwargs, ) return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs) def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save a scheduler configuration object to a directory so that it can be reloaded using the [`~SchedulerMixin.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the configuration JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) @property def compatibles(self): """ Returns all schedulers that are compatible with this scheduler Returns: `List[SchedulerMixin]`: List of compatible schedulers """ return self._get_compatibles() @classmethod def _get_compatibles(cls): compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) diffusers_library = importlib.import_module(__name__.split(".")[0]) compatible_classes = [ getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c) ] return compatible_classes
diffusers/src/diffusers/schedulers/scheduling_utils.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_utils.py", "repo_id": "diffusers", "token_count": 3413 }
187
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class AdaptiveProjectedGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ClassifierFreeGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ClassifierFreeZeroStarGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FrequencyDecoupledGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class PerturbedAttentionGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SkipLayerGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SmoothedEnergyGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class TangentialClassifierFreeGuidance(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FasterCacheConfig(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FirstBlockCacheConfig(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class HookRegistry(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class LayerSkipConfig(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class PyramidAttentionBroadcastConfig(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SmoothedEnergyGuidanceConfig(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) def apply_faster_cache(*args, **kwargs): requires_backends(apply_faster_cache, ["torch"]) def apply_first_block_cache(*args, **kwargs): requires_backends(apply_first_block_cache, ["torch"]) def apply_layer_skip(*args, **kwargs): requires_backends(apply_layer_skip, ["torch"]) def apply_pyramid_attention_broadcast(*args, **kwargs): requires_backends(apply_pyramid_attention_broadcast, ["torch"]) class AllegroTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AsymmetricAutoencoderKL(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AttentionBackendName(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AuraFlowTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderDC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKL(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLAllegro(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLCogVideoX(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLCosmos(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLHunyuanVideo(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLLTXVideo(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLMagvit(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLMochi(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLQwenImage(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLTemporalDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderKLWan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderOobleck(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoencoderTiny(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class BriaTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CacheMixin(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ChromaTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CogVideoXTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CogView3PlusTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CogView4Transformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ConsisIDTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ConsistencyDecoderVAE(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ControlNetUnionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ControlNetXSAdapter(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CosmosTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DiTTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class EasyAnimateTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FluxControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FluxMultiControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FluxTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class HiDreamImageTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class HunyuanDiT2DControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class HunyuanDiT2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class HunyuanDiT2DMultiControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class HunyuanVideoFramepackTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class HunyuanVideoTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class I2VGenXLUNet(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class Kandinsky3UNet(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class LatteTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class LTXVideoTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class Lumina2Transformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class LuminaNextDiT2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class MochiTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ModelMixin(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class MotionAdapter(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class MultiAdapter(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class MultiControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class OmniGenTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class PixArtTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class PriorTransformer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class QwenImageControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class QwenImageMultiControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class QwenImageTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SanaControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SanaTransformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SD3ControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SD3MultiControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SD3Transformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SkyReelsV2Transformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SparseControlNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class StableAudioDiTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class T2IAdapter(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class T5FilmDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class Transformer2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class TransformerTemporalModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UNet1DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UNet2DConditionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UNet2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UNet3DConditionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UNetControlNetXSModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UNetMotionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UNetSpatioTemporalConditionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UVit2DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class VQModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class WanTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class WanVACETransformer3DModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) def attention_backend(*args, **kwargs): requires_backends(attention_backend, ["torch"]) class ComponentsManager(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ComponentSpec(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ModularPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ModularPipelineBlocks(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) def get_constant_schedule(*args, **kwargs): requires_backends(get_constant_schedule, ["torch"]) def get_constant_schedule_with_warmup(*args, **kwargs): requires_backends(get_constant_schedule_with_warmup, ["torch"]) def get_cosine_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_schedule_with_warmup, ["torch"]) def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"]) def get_linear_schedule_with_warmup(*args, **kwargs): requires_backends(get_linear_schedule_with_warmup, ["torch"]) def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"]) def get_scheduler(*args, **kwargs): requires_backends(get_scheduler, ["torch"]) class AudioPipelineOutput(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoPipelineForImage2Image(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoPipelineForInpainting(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AutoPipelineForText2Image(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class BlipDiffusionControlNetPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class BlipDiffusionPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CLIPImageProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ConsistencyModelPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DanceDiffusionPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DDIMPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DDPMPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DiffusionPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DiTPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ImagePipelineOutput(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class KarrasVePipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class LDMPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class LDMSuperResolutionPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class PNDMPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class RePaintPipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ScoreSdeVePipeline(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class StableDiffusionMixin(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DiffusersQuantizer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class AmusedScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CMStochasticIterativeScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CogVideoXDDIMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class CogVideoXDPMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DDIMInverseScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DDIMParallelScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DDIMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DDPMParallelScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DDPMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DDPMWuerstchenScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DEISMultistepScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DPMSolverMultistepInverseScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DPMSolverMultistepScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class DPMSolverSinglestepScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class EDMDPMSolverMultistepScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class EDMEulerScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class EulerAncestralDiscreteScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class EulerDiscreteScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FlowMatchEulerDiscreteScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FlowMatchHeunDiscreteScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class FlowMatchLCMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class HeunDiscreteScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class IPNDMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class KarrasVeScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class KDPM2AncestralDiscreteScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class KDPM2DiscreteScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class LCMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class PNDMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class RePaintScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SASolverScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SchedulerMixin(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class SCMScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class ScoreSdeVeScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class TCDScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UnCLIPScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class UniPCMultistepScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class VQDiffusionScheduler(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) class EMAModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"])
diffusers/src/diffusers/utils/dummy_pt_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_pt_objects.py", "repo_id": "diffusers", "token_count": 26886 }
188
# coding=utf-8 # Copyright 2025 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logging utilities.""" import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Dict, Optional from tqdm import auto as tqdm_lib _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING _tqdm_active = True def _get_default_logging_level() -> int: """ If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to `_default_log_level` """ env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. if sys.stderr: # only if sys.stderr exists, e.g. when not using pythonw in windows _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_log_levels_dict() -> Dict[str, int]: return log_levels def get_logger(name: Optional[str] = None) -> logging.Logger: """ Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom diffusers module. """ if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: """ Return the current level for the 🤗 Diffusers' root logger as an `int`. Returns: `int`: Logging level integers which can be one of: - `50`: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` - `40`: `diffusers.logging.ERROR` - `30`: `diffusers.logging.WARNING` or `diffusers.logging.WARN` - `20`: `diffusers.logging.INFO` - `10`: `diffusers.logging.DEBUG` """ _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """ Set the verbosity level for the 🤗 Diffusers' root logger. Args: verbosity (`int`): Logging level which can be one of: - `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` - `diffusers.logging.ERROR` - `diffusers.logging.WARNING` or `diffusers.logging.WARN` - `diffusers.logging.INFO` - `diffusers.logging.DEBUG` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info() -> None: """Set the verbosity to the `INFO` level.""" return set_verbosity(INFO) def set_verbosity_warning() -> None: """Set the verbosity to the `WARNING` level.""" return set_verbosity(WARNING) def set_verbosity_debug() -> None: """Set the verbosity to the `DEBUG` level.""" return set_verbosity(DEBUG) def set_verbosity_error() -> None: """Set the verbosity to the `ERROR` level.""" return set_verbosity(ERROR) def disable_default_handler() -> None: """Disable the default handler of the 🤗 Diffusers' root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: """Enable the default handler of the 🤗 Diffusers' root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def add_handler(handler: logging.Handler) -> None: """adds a handler to the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: """removes given handler from the HuggingFace Diffusers' root logger.""" _configure_library_root_logger() assert handler is not None and handler in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler) def disable_propagation() -> None: """ Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: """ Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent double logging if the root logger has been configured. """ _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: """ Enable explicit formatting for every 🤗 Diffusers' logger. The explicit formatter is as follows: ``` [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE ``` All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter) def reset_format() -> None: """ Resets the formatting for 🤗 Diffusers' loggers. All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None) def warning_advice(self, *args, **kwargs) -> None: """ This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this warning will not be printed """ no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice class EmptyTqdm: """Dummy tqdm which doesn't do anything.""" def __init__(self, *args, **kwargs): # pylint: disable=unused-argument self._iterator = args[0] if args else None def __iter__(self): return iter(self._iterator) def __getattr__(self, _): """Return empty function.""" def empty_fn(*args, **kwargs): # pylint: disable=unused-argument return return empty_fn def __enter__(self): return self def __exit__(self, type_, value, traceback): return class _tqdm_cls: def __call__(self, *args, **kwargs): if _tqdm_active: return tqdm_lib.tqdm(*args, **kwargs) else: return EmptyTqdm(*args, **kwargs) def set_lock(self, *args, **kwargs): self._lock = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*args, **kwargs) def get_lock(self): if _tqdm_active: return tqdm_lib.tqdm.get_lock() tqdm = _tqdm_cls() def is_progress_bar_enabled() -> bool: """Return a boolean indicating whether tqdm progress bars are enabled.""" global _tqdm_active return bool(_tqdm_active) def enable_progress_bar() -> None: """Enable tqdm progress bar.""" global _tqdm_active _tqdm_active = True def disable_progress_bar() -> None: """Disable tqdm progress bar.""" global _tqdm_active _tqdm_active = False
diffusers/src/diffusers/utils/logging.py/0
{ "file_path": "diffusers/src/diffusers/utils/logging.py", "repo_id": "diffusers", "token_count": 3634 }
189
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import importlib import sys import time import unittest import numpy as np import torch from packaging import version from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( ControlNetModel, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLAdapterPipeline, StableDiffusionXLControlNetPipeline, StableDiffusionXLPipeline, T2IAdapter, ) from diffusers.utils import logging from diffusers.utils.import_utils import is_accelerate_available from diffusers.utils.testing_utils import ( CaptureLogger, backend_empty_cache, is_flaky, load_image, nightly, numpy_cosine_similarity_distance, require_peft_backend, require_torch_accelerator, slow, torch_device, ) sys.path.append(".") from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set, state_dicts_almost_equal # noqa: E402 if is_accelerate_available(): from accelerate.utils import release_memory class StableDiffusionXLLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase): has_two_text_encoders = True pipeline_class = StableDiffusionXLPipeline scheduler_cls = EulerDiscreteScheduler scheduler_kwargs = { "beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear", "timestep_spacing": "leading", "steps_offset": 1, } unet_kwargs = { "block_out_channels": (32, 64), "layers_per_block": 2, "sample_size": 32, "in_channels": 4, "out_channels": 4, "down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"), "up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"), "attention_head_dim": (2, 4), "use_linear_projection": True, "addition_embed_type": "text_time", "addition_time_embed_dim": 8, "transformer_layers_per_block": (1, 2), "projection_class_embeddings_input_dim": 80, # 6 * 8 + 32 "cross_attention_dim": 64, } vae_kwargs = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, "sample_size": 128, } text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2" tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2" text_encoder_2_cls, text_encoder_2_id = CLIPTextModelWithProjection, "peft-internal-testing/tiny-clip-text-2" tokenizer_2_cls, tokenizer_2_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2" @property def output_shape(self): return (1, 64, 64, 3) def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) @is_flaky def test_multiple_wrong_adapter_name_raises_error(self): super().test_multiple_wrong_adapter_name_raises_error() def test_simple_inference_with_text_denoiser_lora_unfused(self): if torch.cuda.is_available(): expected_atol = 9e-2 expected_rtol = 9e-2 else: expected_atol = 1e-3 expected_rtol = 1e-3 super().test_simple_inference_with_text_denoiser_lora_unfused( expected_atol=expected_atol, expected_rtol=expected_rtol ) def test_simple_inference_with_text_lora_denoiser_fused_multi(self): if torch.cuda.is_available(): expected_atol = 9e-2 expected_rtol = 9e-2 else: expected_atol = 1e-3 expected_rtol = 1e-3 super().test_simple_inference_with_text_lora_denoiser_fused_multi( expected_atol=expected_atol, expected_rtol=expected_rtol ) def test_lora_scale_kwargs_match_fusion(self): if torch.cuda.is_available(): expected_atol = 9e-2 expected_rtol = 9e-2 else: expected_atol = 1e-3 expected_rtol = 1e-3 super().test_lora_scale_kwargs_match_fusion(expected_atol=expected_atol, expected_rtol=expected_rtol) @slow @nightly @require_torch_accelerator @require_peft_backend class LoraSDXLIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_sdxl_1_0_lora(self): generator = torch.Generator("cpu").manual_seed(0) pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_model_cpu_offload() lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_sdxl_1_0_blockwise_lora(self): generator = torch.Generator("cpu").manual_seed(0) pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_model_cpu_offload() lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, adapter_name="offset") scales = { "unet": { "down": {"block_1": [1.0, 1.0], "block_2": [1.0, 1.0]}, "mid": 1.0, "up": {"block_0": [1.0, 1.0, 1.0], "block_1": [1.0, 1.0, 1.0]}, }, } pipe.set_adapters(["offset"], [scales]) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([00.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_sdxl_lcm_lora(self): pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() generator = torch.Generator("cpu").manual_seed(0) lora_model_id = "latent-consistency/lcm-lora-sdxl" pipe.load_lora_weights(lora_model_id) image = pipe( "masterpiece, best quality, mountain", generator=generator, num_inference_steps=4, guidance_scale=0.5 ).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdxl_lcm_lora.png" ) image_np = pipe.image_processor.pil_to_numpy(image) expected_image_np = pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten()) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_sdxl_1_0_lora_fusion(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.fuse_lora() # We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being # silently deleted - otherwise this will CPU OOM pipe.unload_lora_weights() pipe.enable_model_cpu_offload() images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() # This way we also test equivalence between LoRA fusion and the non-fusion behaviour. expected = np.array([0.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-4 release_memory(pipe) def test_sdxl_1_0_lora_unfusion(self): generator = torch.Generator("cpu").manual_seed(0) pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.fuse_lora() pipe.enable_model_cpu_offload() images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3 ).images images_with_fusion = images.flatten() pipe.unfuse_lora() generator = torch.Generator("cpu").manual_seed(0) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3 ).images images_without_fusion = images.flatten() max_diff = numpy_cosine_similarity_distance(images_with_fusion, images_without_fusion) assert max_diff < 1e-4 release_memory(pipe) def test_sdxl_1_0_lora_unfusion_effectivity(self): pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_model_cpu_offload() generator = torch.Generator().manual_seed(0) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images original_image_slice = images[0, -3:, -3:, -1].flatten() lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) pipe.fuse_lora() generator = torch.Generator().manual_seed(0) _ = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images pipe.unfuse_lora() # We need to unload the lora weights - in the old API unfuse led to unloading the adapter weights pipe.unload_lora_weights() generator = torch.Generator().manual_seed(0) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images_without_fusion_slice = images[0, -3:, -3:, -1].flatten() max_diff = numpy_cosine_similarity_distance(images_without_fusion_slice, original_image_slice) assert max_diff < 1e-3 release_memory(pipe) def test_sdxl_1_0_lora_fusion_efficiency(self): generator = torch.Generator().manual_seed(0) lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() start_time = time.time() for _ in range(3): pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images end_time = time.time() elapsed_time_non_fusion = end_time - start_time del pipe pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16) pipe.fuse_lora() # We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being # silently deleted - otherwise this will CPU OOM pipe.unload_lora_weights() pipe.enable_model_cpu_offload() generator = torch.Generator().manual_seed(0) start_time = time.time() for _ in range(3): pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images end_time = time.time() elapsed_time_fusion = end_time - start_time self.assertTrue(elapsed_time_fusion < elapsed_time_non_fusion) release_memory(pipe) def test_sdxl_1_0_last_ben(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_model_cpu_offload() lora_model_id = "TheLastBen/Papercut_SDXL" lora_filename = "papercut.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe("papercut.safetensors", output_type="np", generator=generator, num_inference_steps=2).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.5244, 0.4347, 0.4312, 0.4246, 0.4398, 0.4409, 0.4884, 0.4938, 0.4094]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_sdxl_1_0_fuse_unfuse_all(self): pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) text_encoder_1_sd = copy.deepcopy(pipe.text_encoder.state_dict()) text_encoder_2_sd = copy.deepcopy(pipe.text_encoder_2.state_dict()) unet_sd = copy.deepcopy(pipe.unet.state_dict()) pipe.load_lora_weights( "davizca87/sun-flower", weight_name="snfw3rXL-000004.safetensors", torch_dtype=torch.float16 ) fused_te_state_dict = pipe.text_encoder.state_dict() fused_te_2_state_dict = pipe.text_encoder_2.state_dict() unet_state_dict = pipe.unet.state_dict() peft_ge_070 = version.parse(importlib.metadata.version("peft")) >= version.parse("0.7.0") def remap_key(key, sd): # some keys have moved around for PEFT >= 0.7.0, but they should still be loaded correctly if (key in sd) or (not peft_ge_070): return key # instead of linear.weight, we now have linear.base_layer.weight, etc. if key.endswith(".weight"): key = key[:-7] + ".base_layer.weight" elif key.endswith(".bias"): key = key[:-5] + ".base_layer.bias" return key for key, value in text_encoder_1_sd.items(): key = remap_key(key, fused_te_state_dict) self.assertTrue(torch.allclose(fused_te_state_dict[key], value)) for key, value in text_encoder_2_sd.items(): key = remap_key(key, fused_te_2_state_dict) self.assertTrue(torch.allclose(fused_te_2_state_dict[key], value)) for key, value in unet_state_dict.items(): self.assertTrue(torch.allclose(unet_state_dict[key], value)) pipe.fuse_lora() pipe.unload_lora_weights() assert not state_dicts_almost_equal(text_encoder_1_sd, pipe.text_encoder.state_dict()) assert not state_dicts_almost_equal(text_encoder_2_sd, pipe.text_encoder_2.state_dict()) assert not state_dicts_almost_equal(unet_sd, pipe.unet.state_dict()) release_memory(pipe) del unet_sd, text_encoder_1_sd, text_encoder_2_sd def test_sdxl_1_0_lora_with_sequential_cpu_offloading(self): generator = torch.Generator().manual_seed(0) pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") pipe.enable_sequential_cpu_offload() lora_model_id = "hf-internal-testing/sdxl-1.0-lora" lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images images = images[0, -3:, -3:, -1].flatten() expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535]) max_diff = numpy_cosine_similarity_distance(expected, images) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) def test_controlnet_canny_lora(self): controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors") pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) prompt = "corgi" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4574, 0.4487, 0.4435, 0.5163, 0.4396, 0.4411, 0.518, 0.4465, 0.4333]) max_diff = numpy_cosine_similarity_distance(expected_image, original_image) assert max_diff < 1e-4 pipe.unload_lora_weights() release_memory(pipe) def test_sdxl_t2i_adapter_canny_lora(self): adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16).to( "cpu" ) pipe = StableDiffusionXLAdapterPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", adapter=adapter, torch_dtype=torch.float16, variant="fp16", ) pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors") pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "toy" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) image_slice = images[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.4284, 0.4337, 0.4319, 0.4255, 0.4329, 0.4280, 0.4338, 0.4420, 0.4226]) assert numpy_cosine_similarity_distance(image_slice, expected_slice) < 1e-4 @nightly def test_sequential_fuse_unfuse(self): pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) # 1. round pipe.load_lora_weights("Pclanglais/TintinIA", torch_dtype=torch.float16) pipe.to(torch_device) pipe.fuse_lora() generator = torch.Generator().manual_seed(0) images = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images image_slice = images[0, -3:, -3:, -1].flatten() pipe.unfuse_lora() # 2. round pipe.load_lora_weights("ProomptEngineer/pe-balloon-diffusion-style", torch_dtype=torch.float16) pipe.fuse_lora() pipe.unfuse_lora() # 3. round pipe.load_lora_weights("ostris/crayon_style_lora_sdxl", torch_dtype=torch.float16) pipe.fuse_lora() pipe.unfuse_lora() # 4. back to 1st round pipe.load_lora_weights("Pclanglais/TintinIA", torch_dtype=torch.float16) pipe.fuse_lora() generator = torch.Generator().manual_seed(0) images_2 = pipe( "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 ).images image_slice_2 = images_2[0, -3:, -3:, -1].flatten() max_diff = numpy_cosine_similarity_distance(image_slice, image_slice_2) assert max_diff < 1e-3 pipe.unload_lora_weights() release_memory(pipe) @nightly def test_integration_logits_multi_adapter(self): path = "stabilityai/stable-diffusion-xl-base-1.0" lora_id = "CiroN2022/toy-face" pipe = StableDiffusionXLPipeline.from_pretrained(path, torch_dtype=torch.float16) pipe.load_lora_weights(lora_id, weight_name="toy_face_sdxl.safetensors", adapter_name="toy") pipe = pipe.to(torch_device) self.assertTrue(check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in Unet") prompt = "toy_face of a hacker with a hoodie" lora_scale = 0.9 images = pipe( prompt=prompt, num_inference_steps=30, generator=torch.manual_seed(0), cross_attention_kwargs={"scale": lora_scale}, output_type="np", ).images expected_slice_scale = np.array([0.538, 0.539, 0.540, 0.540, 0.542, 0.539, 0.538, 0.541, 0.539]) predicted_slice = images[0, -3:, -3:, -1].flatten() max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) assert max_diff < 1e-3 pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") pipe.set_adapters("pixel") prompt = "pixel art, a hacker with a hoodie, simple, flat colors" images = pipe( prompt, num_inference_steps=30, guidance_scale=7.5, cross_attention_kwargs={"scale": lora_scale}, generator=torch.manual_seed(0), output_type="np", ).images predicted_slice = images[0, -3:, -3:, -1].flatten() expected_slice_scale = np.array( [0.61973065, 0.62018543, 0.62181497, 0.61933696, 0.6208608, 0.620576, 0.6200281, 0.62258327, 0.6259889] ) max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) assert max_diff < 1e-3 # multi-adapter inference pipe.set_adapters(["pixel", "toy"], adapter_weights=[0.5, 1.0]) images = pipe( prompt, num_inference_steps=30, guidance_scale=7.5, cross_attention_kwargs={"scale": 1.0}, generator=torch.manual_seed(0), output_type="np", ).images predicted_slice = images[0, -3:, -3:, -1].flatten() expected_slice_scale = np.array([0.5888, 0.5897, 0.5946, 0.5888, 0.5935, 0.5946, 0.5857, 0.5891, 0.5909]) max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) assert max_diff < 1e-3 # Lora disabled pipe.disable_lora() images = pipe( prompt, num_inference_steps=30, guidance_scale=7.5, cross_attention_kwargs={"scale": lora_scale}, generator=torch.manual_seed(0), output_type="np", ).images predicted_slice = images[0, -3:, -3:, -1].flatten() expected_slice_scale = np.array([0.5456, 0.5466, 0.5487, 0.5458, 0.5469, 0.5454, 0.5446, 0.5479, 0.5487]) max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) assert max_diff < 1e-3 @nightly def test_integration_logits_for_dora_lora(self): pipeline = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") logger = logging.get_logger("diffusers.loaders.lora_pipeline") logger.setLevel(30) with CaptureLogger(logger) as cap_logger: pipeline.load_lora_weights("hf-internal-testing/dora-trained-on-kohya") pipeline.enable_model_cpu_offload() images = pipeline( "photo of ohwx dog", num_inference_steps=10, generator=torch.manual_seed(0), output_type="np", ).images assert "It seems like you are using a DoRA checkpoint" in cap_logger.out predicted_slice = images[0, -3:, -3:, -1].flatten() expected_slice_scale = np.array([0.1817, 0.0697, 0.2346, 0.0900, 0.1261, 0.2279, 0.1767, 0.1991, 0.2886]) max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) assert max_diff < 1e-3
diffusers/tests/lora/test_lora_layers_sdxl.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_sdxl.py", "repo_id": "diffusers", "token_count": 12339 }
190
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from datasets import load_dataset from parameterized import parameterized from diffusers import AutoencoderOobleck from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderOobleckTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderOobleck main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_oobleck_config(self, block_out_channels=None): init_dict = { "encoder_hidden_size": 12, "decoder_channels": 12, "decoder_input_channels": 6, "audio_channels": 2, "downsampling_ratios": [2, 4], "channel_multiples": [1, 2], } return init_dict @property def dummy_input(self): batch_size = 4 num_channels = 2 seq_len = 24 waveform = floats_tensor((batch_size, num_channels, seq_len)).to(torch_device) return {"sample": waveform, "sample_posterior": False} @property def input_shape(self): return (2, 24) @property def output_shape(self): return (2, 24) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_oobleck_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_enable_disable_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_slicing() output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), 0.5, "VAE slicing should not affect the inference results", ) torch.manual_seed(0) model.disable_slicing() output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_slicing.detach().cpu().numpy().all(), output_without_slicing_2.detach().cpu().numpy().all(), "Without slicing outputs should match with the outputs when slicing is manually disabled.", ) @unittest.skip("Test unsupported.") def test_forward_with_norm_groups(self): pass @unittest.skip("No attention module used in this model") def test_set_attn_processor_for_determinism(self): return @unittest.skip( "Test not supported because of 'weight_norm_fwd_first_dim_kernel' not implemented for 'Float8_e4m3fn'" ) def test_layerwise_casting_training(self): return super().test_layerwise_casting_training() @unittest.skip( "The convolution layers of AutoencoderOobleck are wrapped with torch.nn.utils.weight_norm. This causes the hook's pre_forward to not " "cast the module weights to compute_dtype (as required by forward pass). As a result, forward pass errors out. To fix:\n" "1. Make sure `nn::Module::to` works with `torch.nn.utils.weight_norm` wrapped convolution layer.\n" "2. Unskip this test." ) def test_layerwise_casting_inference(self): pass @unittest.skip( "The convolution layers of AutoencoderOobleck are wrapped with torch.nn.utils.weight_norm. This causes the hook's pre_forward to not " "cast the module weights to compute_dtype (as required by forward pass). As a result, forward pass errors out. To fix:\n" "1. Make sure `nn::Module::to` works with `torch.nn.utils.weight_norm` wrapped convolution layer.\n" "2. Unskip this test." ) def test_layerwise_casting_memory(self): pass @slow class AutoencoderOobleckIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def _load_datasamples(self, num_samples): ds = load_dataset( "hf-internal-testing/librispeech_asr_dummy", "clean", split="validation", trust_remote_code=True ) # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return torch.nn.utils.rnn.pad_sequence( [torch.from_numpy(x["array"]) for x in speech_samples], batch_first=True ) def get_audio(self, audio_sample_size=2097152, fp16=False): dtype = torch.float16 if fp16 else torch.float32 audio = self._load_datasamples(2).to(torch_device).to(dtype) # pad / crop to audio_sample_size audio = torch.nn.functional.pad(audio[:, :audio_sample_size], pad=(0, audio_sample_size - audio.shape[-1])) # todo channel audio = audio.unsqueeze(1).repeat(1, 2, 1).to(torch_device) return audio def get_oobleck_vae_model(self, model_id="stabilityai/stable-audio-open-1.0", fp16=False): torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderOobleck.from_pretrained( model_id, subfolder="vae", torch_dtype=torch_dtype, ) model.to(torch_device) return model def get_generator(self, seed=0): generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device if torch_device != "mps": return torch.Generator(device=generator_device).manual_seed(seed) return torch.manual_seed(seed) @parameterized.expand( [ # fmt: off [33, [1.193e-4, 6.56e-05, 1.314e-4, 3.80e-05, -4.01e-06], 0.001192], [44, [2.77e-05, -2.65e-05, 1.18e-05, -6.94e-05, -9.57e-05], 0.001196], # fmt: on ] ) def test_stable_diffusion(self, seed, expected_slice, expected_mean_absolute_diff): model = self.get_oobleck_vae_model() audio = self.get_audio() generator = self.get_generator(seed) with torch.no_grad(): sample = model(audio, generator=generator, sample_posterior=True).sample assert sample.shape == audio.shape assert ((sample - audio).abs().mean() - expected_mean_absolute_diff).abs() <= 1e-6 output_slice = sample[-1, 1, 5:10].cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-5) def test_stable_diffusion_mode(self): model = self.get_oobleck_vae_model() audio = self.get_audio() with torch.no_grad(): sample = model(audio, sample_posterior=False).sample assert sample.shape == audio.shape @parameterized.expand( [ # fmt: off [33, [1.193e-4, 6.56e-05, 1.314e-4, 3.80e-05, -4.01e-06], 0.001192], [44, [2.77e-05, -2.65e-05, 1.18e-05, -6.94e-05, -9.57e-05], 0.001196], # fmt: on ] ) def test_stable_diffusion_encode_decode(self, seed, expected_slice, expected_mean_absolute_diff): model = self.get_oobleck_vae_model() audio = self.get_audio() generator = self.get_generator(seed) with torch.no_grad(): x = audio posterior = model.encode(x).latent_dist z = posterior.sample(generator=generator) sample = model.decode(z).sample # (batch_size, latent_dim, sequence_length) assert posterior.mean.shape == (audio.shape[0], model.config.decoder_input_channels, 1024) assert sample.shape == audio.shape assert ((sample - audio).abs().mean() - expected_mean_absolute_diff).abs() <= 1e-6 output_slice = sample[-1, 1, 5:10].cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-5)
diffusers/tests/models/autoencoders/test_models_autoencoder_oobleck.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_autoencoder_oobleck.py", "repo_id": "diffusers", "token_count": 3865 }
191
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class PriorTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = PriorTransformer main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 4 embedding_dim = 8 num_embeddings = 7 hidden_states = floats_tensor((batch_size, embedding_dim)).to(torch_device) proj_embedding = floats_tensor((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def get_dummy_seed_input(self, seed=0): torch.manual_seed(seed) batch_size = 4 embedding_dim = 8 num_embeddings = 7 hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) proj_embedding = torch.randn((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def input_shape(self): return (4, 8) @property def output_shape(self): return (4, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): model, loading_info = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=True ) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) hidden_states = model(**self.dummy_input)[0] assert hidden_states is not None, "Make sure output is not None" def test_forward_signature(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_output_pretrained(self): model = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy") model = model.to(torch_device) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() input = self.get_dummy_seed_input() with torch.no_grad(): output = model(**input)[0] output_slice = output[0, :5].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. expected_output_slice = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239]) self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) @slow class PriorTransformerIntegrationTests(unittest.TestCase): def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0): torch.manual_seed(seed) hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) proj_embedding = torch.randn((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def test_kandinsky_prior(self, seed, expected_slice): model = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior") model.to(torch_device) input = self.get_dummy_seed_input(seed=seed) with torch.no_grad(): sample = model(**input)[0] assert list(sample.shape) == [1, 768] output_slice = sample[0, :8].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
diffusers/tests/models/transformers/test_models_prior.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_prior.py", "repo_id": "diffusers", "token_count": 2687 }
192
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import LatteTransformer3DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class LatteTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = LatteTransformer3DModel main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 2 num_channels = 4 num_frames = 1 height = width = 8 embedding_dim = 8 sequence_length = 8 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, "enable_temporal_attentions": True, } @property def input_shape(self): return (4, 1, 8, 8) @property def output_shape(self): return (8, 1, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 8, "num_layers": 1, "patch_size": 2, "attention_head_dim": 4, "num_attention_heads": 2, "caption_channels": 8, "in_channels": 4, "cross_attention_dim": 8, "out_channels": 8, "attention_bias": True, "activation_fn": "gelu-approximate", "num_embeds_ada_norm": 1000, "norm_type": "ada_norm_single", "norm_elementwise_affine": False, "norm_eps": 1e-6, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): super().test_output( expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape ) def test_gradient_checkpointing_is_applied(self): expected_set = {"LatteTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
diffusers/tests/models/transformers/test_models_transformer_latte.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_latte.py", "repo_id": "diffusers", "token_count": 1241 }
193
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNet2DConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class FlaxUNet2DConditionModelIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): dtype = jnp.bfloat16 if fp16 else jnp.float32 image = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) return image def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): dtype = jnp.bfloat16 if fp16 else jnp.float32 revision = "bf16" if fp16 else None model, params = FlaxUNet2DConditionModel.from_pretrained( model_id, subfolder="unet", dtype=dtype, revision=revision ) return model, params def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): dtype = jnp.bfloat16 if fp16 else jnp.float32 hidden_states = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def test_compvis_sd_v1_4_flax_vs_torch_fp16(self, seed, timestep, expected_slice): model, params = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) latents = self.get_latents(seed, fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) sample = model.apply( {"params": params}, latents, jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=encoder_hidden_states, ).sample assert sample.shape == latents.shape output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def test_stabilityai_sd_v2_flax_vs_torch_fp16(self, seed, timestep, expected_slice): model, params = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) sample = model.apply( {"params": params}, latents, jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=encoder_hidden_states, ).sample assert sample.shape == latents.shape output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2)
diffusers/tests/models/unets/test_models_unet_2d_flax.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_2d_flax.py", "repo_id": "diffusers", "token_count": 2141 }
194
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from diffusers import UNet2DConditionModel from diffusers.training_utils import EMAModel from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device enable_full_determinism() class EMAModelTests(unittest.TestCase): model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" batch_size = 1 prompt_length = 77 text_encoder_hidden_dim = 32 num_in_channels = 4 latent_height = latent_width = 64 generator = torch.manual_seed(0) def get_models(self, decay=0.9999): unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") unet = unet.to(torch_device) ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config) return unet, ema_unet def get_dummy_inputs(self): noisy_latents = torch.randn( self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator ).to(torch_device) timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) encoder_hidden_states = torch.randn( self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator ).to(torch_device) return noisy_latents, timesteps, encoder_hidden_states def simulate_backprop(self, unet): updated_state_dict = {} for k, param in unet.state_dict().items(): updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) updated_state_dict.update({k: updated_param}) unet.load_state_dict(updated_state_dict) return unet def test_from_pretrained(self): # Save the model parameters to a temporary directory unet, ema_unet = self.get_models() with tempfile.TemporaryDirectory() as tmpdir: ema_unet.save_pretrained(tmpdir) # Load the EMA model from the saved directory loaded_ema_unet = EMAModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel, foreach=False) loaded_ema_unet.to(torch_device) # Check that the shadow parameters of the loaded model match the original EMA model for original_param, loaded_param in zip(ema_unet.shadow_params, loaded_ema_unet.shadow_params): assert torch.allclose(original_param, loaded_param, atol=1e-4) # Verify that the optimization step is also preserved assert loaded_ema_unet.optimization_step == ema_unet.optimization_step # Check the decay value assert loaded_ema_unet.decay == ema_unet.decay def test_optimization_steps_updated(self): unet, ema_unet = self.get_models() # Take the first (hypothetical) EMA step. ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 1 # Take two more. for _ in range(2): ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 3 def test_shadow_params_not_updated(self): unet, ema_unet = self.get_models() # Since the `unet` is not being updated (i.e., backprop'd) # there won't be any difference between the `params` of `unet` # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. ema_unet.step(unet.parameters()) orig_params = list(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) # The above holds true even if we call `ema.step()` multiple times since # `unet` params are still not being updated. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) def test_shadow_params_updated(self): unet, ema_unet = self.get_models() # Here we simulate the parameter updates for `unet`. Since there might # be some parameters which are initialized to zero we take extra care to # initialize their values to something non-zero before the multiplication. unet_pseudo_updated_step_one = self.simulate_backprop(unet) # Take the EMA step. ema_unet.step(unet_pseudo_updated_step_one.parameters()) # Now the EMA'd parameters won't be equal to the original model parameters. orig_params = list(unet_pseudo_updated_step_one.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) # Ensure this is the case when we take multiple EMA steps. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) def test_consecutive_shadow_params_updated(self): # If we call EMA step after a backpropagation consecutively for two times, # the shadow params from those two steps should be different. unet, ema_unet = self.get_models() # First backprop + EMA unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params # Second backprop + EMA unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert ~torch.allclose(step_one, step_two) def test_zero_decay(self): # If there's no decay even if there are backprops, EMA steps # won't take any effect i.e., the shadow params would remain the # same. unet, ema_unet = self.get_models(decay=0.0) unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert torch.allclose(step_one, step_two) @skip_mps def test_serialization(self): unet, ema_unet = self.get_models() noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() with tempfile.TemporaryDirectory() as tmpdir: ema_unet.save_pretrained(tmpdir) loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) loaded_unet = loaded_unet.to(unet.device) # Since no EMA step has been performed the outputs should match. output = unet(noisy_latents, timesteps, encoder_hidden_states).sample output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample assert torch.allclose(output, output_loaded, atol=1e-4) class EMAModelTestsForeach(unittest.TestCase): model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" batch_size = 1 prompt_length = 77 text_encoder_hidden_dim = 32 num_in_channels = 4 latent_height = latent_width = 64 generator = torch.manual_seed(0) def get_models(self, decay=0.9999): unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") unet = unet.to(torch_device) ema_unet = EMAModel( unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config, foreach=True ) return unet, ema_unet def get_dummy_inputs(self): noisy_latents = torch.randn( self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator ).to(torch_device) timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) encoder_hidden_states = torch.randn( self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator ).to(torch_device) return noisy_latents, timesteps, encoder_hidden_states def simulate_backprop(self, unet): updated_state_dict = {} for k, param in unet.state_dict().items(): updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) updated_state_dict.update({k: updated_param}) unet.load_state_dict(updated_state_dict) return unet def test_from_pretrained(self): # Save the model parameters to a temporary directory unet, ema_unet = self.get_models() with tempfile.TemporaryDirectory() as tmpdir: ema_unet.save_pretrained(tmpdir) # Load the EMA model from the saved directory loaded_ema_unet = EMAModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel, foreach=True) loaded_ema_unet.to(torch_device) # Check that the shadow parameters of the loaded model match the original EMA model for original_param, loaded_param in zip(ema_unet.shadow_params, loaded_ema_unet.shadow_params): assert torch.allclose(original_param, loaded_param, atol=1e-4) # Verify that the optimization step is also preserved assert loaded_ema_unet.optimization_step == ema_unet.optimization_step # Check the decay value assert loaded_ema_unet.decay == ema_unet.decay def test_optimization_steps_updated(self): unet, ema_unet = self.get_models() # Take the first (hypothetical) EMA step. ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 1 # Take two more. for _ in range(2): ema_unet.step(unet.parameters()) assert ema_unet.optimization_step == 3 def test_shadow_params_not_updated(self): unet, ema_unet = self.get_models() # Since the `unet` is not being updated (i.e., backprop'd) # there won't be any difference between the `params` of `unet` # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. ema_unet.step(unet.parameters()) orig_params = list(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) # The above holds true even if we call `ema.step()` multiple times since # `unet` params are still not being updated. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert torch.allclose(s_param, param) def test_shadow_params_updated(self): unet, ema_unet = self.get_models() # Here we simulate the parameter updates for `unet`. Since there might # be some parameters which are initialized to zero we take extra care to # initialize their values to something non-zero before the multiplication. unet_pseudo_updated_step_one = self.simulate_backprop(unet) # Take the EMA step. ema_unet.step(unet_pseudo_updated_step_one.parameters()) # Now the EMA'd parameters won't be equal to the original model parameters. orig_params = list(unet_pseudo_updated_step_one.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) # Ensure this is the case when we take multiple EMA steps. for _ in range(4): ema_unet.step(unet.parameters()) for s_param, param in zip(ema_unet.shadow_params, orig_params): assert ~torch.allclose(s_param, param) def test_consecutive_shadow_params_updated(self): # If we call EMA step after a backpropagation consecutively for two times, # the shadow params from those two steps should be different. unet, ema_unet = self.get_models() # First backprop + EMA unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params # Second backprop + EMA unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert ~torch.allclose(step_one, step_two) def test_zero_decay(self): # If there's no decay even if there are backprops, EMA steps # won't take any effect i.e., the shadow params would remain the # same. unet, ema_unet = self.get_models(decay=0.0) unet_step_one = self.simulate_backprop(unet) ema_unet.step(unet_step_one.parameters()) step_one_shadow_params = ema_unet.shadow_params unet_step_two = self.simulate_backprop(unet_step_one) ema_unet.step(unet_step_two.parameters()) step_two_shadow_params = ema_unet.shadow_params for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): assert torch.allclose(step_one, step_two) @skip_mps def test_serialization(self): unet, ema_unet = self.get_models() noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() with tempfile.TemporaryDirectory() as tmpdir: ema_unet.save_pretrained(tmpdir) loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) loaded_unet = loaded_unet.to(unet.device) # Since no EMA step has been performed the outputs should match. output = unet(noisy_latents, timesteps, encoder_hidden_states).sample output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample assert torch.allclose(output, output_loaded, atol=1e-4)
diffusers/tests/others/test_ema.py/0
{ "file_path": "diffusers/tests/others/test_ema.py", "repo_id": "diffusers", "token_count": 6188 }
195
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKL, CogVideoXDDIMScheduler, CogView3PlusPipeline, CogView3PlusTransformer2DModel from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, to_np, ) enable_full_determinism() class CogView3PlusPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = CogView3PlusPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): torch.manual_seed(0) transformer = CogView3PlusTransformer2DModel( patch_size=2, in_channels=4, num_layers=1, attention_head_dim=4, num_attention_heads=2, out_channels=4, text_embed_dim=32, # Must match with tiny-random-t5 time_embed_dim=8, condition_dim=2, pos_embed_max_size=8, sample_size=8, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) scheduler = CogVideoXDDIMScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 16, "width": 16, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs)[0] generated_image = image[0] self.assertEqual(generated_image.shape, (3, 16, 16)) expected_image = torch.randn(3, 16, 16) max_diff = np.abs(generated_image - expected_image).max() self.assertLessEqual(max_diff, 1e10) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_encode_prompt_works_in_isolation(self): return super().test_encode_prompt_works_in_isolation(atol=1e-3, rtol=1e-3) @slow @require_torch_accelerator class CogView3PlusPipelineIntegrationTests(unittest.TestCase): prompt = "A painting of a squirrel eating a burger." def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_cogview3plus(self): generator = torch.Generator("cpu").manual_seed(0) pipe = CogView3PlusPipeline.from_pretrained("THUDM/CogView3Plus-3b", torch_dtype=torch.float16) pipe.enable_model_cpu_offload(device=torch_device) prompt = self.prompt images = pipe( prompt=prompt, height=1024, width=1024, generator=generator, num_inference_steps=2, output_type="np", )[0] image = images[0] expected_image = torch.randn(1, 1024, 1024, 3).numpy() max_diff = numpy_cosine_similarity_distance(image, expected_image) assert max_diff < 1e-3, f"Max diff is too high. got {image}"
diffusers/tests/pipelines/cogview3/test_cogview3plus.py/0
{ "file_path": "diffusers/tests/pipelines/cogview3/test_cogview3plus.py", "repo_id": "diffusers", "token_count": 4485 }
196
# coding=utf-8 # Copyright 2025 HuggingFace Inc and The InstantX Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxControlNetPipeline, FluxTransformer2DModel, ) from diffusers.models import FluxControlNetModel from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, nightly, numpy_cosine_similarity_distance, require_big_accelerator, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin enable_full_determinism() class FluxControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin, FluxIPAdapterTesterMixin): pipeline_class = FluxControlNetPipeline params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) batch_params = frozenset(["prompt"]) test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): torch.manual_seed(0) transformer = FluxTransformer2DModel( patch_size=1, in_channels=16, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) torch.manual_seed(0) controlnet = FluxControlNetModel( patch_size=1, in_channels=16, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModel(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = T5TokenizerFast.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=4, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, "controlnet": controlnet, "image_encoder": None, "feature_extractor": None, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) control_image = randn_tensor( (1, 3, 32, 32), generator=generator, device=torch.device(device), dtype=torch.float16, ) controlnet_conditioning_scale = 0.5 inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 3.5, "output_type": "np", "control_image": control_image, "controlnet_conditioning_scale": controlnet_conditioning_scale, } return inputs def test_controlnet_flux(self): components = self.get_dummy_components() flux_pipe = FluxControlNetPipeline(**components) flux_pipe = flux_pipe.to(torch_device, dtype=torch.float16) flux_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = flux_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array( [0.47387695, 0.63134766, 0.5605469, 0.61621094, 0.7207031, 0.7089844, 0.70410156, 0.6113281, 0.64160156] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( f"Expected: {expected_slice}, got: {image_slice.flatten()}" ) @unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention") def test_xformers_attention_forwardGenerator_pass(self): pass def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) height_width_pairs = [(32, 32), (72, 56)] for height, width in height_width_pairs: expected_height = height - height % (pipe.vae_scale_factor * 2) expected_width = width - width % (pipe.vae_scale_factor * 2) inputs.update( { "control_image": randn_tensor( (1, 3, height, width), device=torch_device, dtype=torch.float16, ) } ) image = pipe(**inputs).images[0] output_height, output_width, _ = image.shape assert (output_height, output_width) == (expected_height, expected_width) @nightly @require_big_accelerator class FluxControlNetPipelineSlowTests(unittest.TestCase): pipeline_class = FluxControlNetPipeline def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_canny(self): controlnet = FluxControlNetModel.from_pretrained( "InstantX/FLUX.1-dev-Controlnet-Canny-alpha", torch_dtype=torch.bfloat16 ) pipe = FluxControlNetPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", text_encoder=None, text_encoder_2=None, controlnet=controlnet, torch_dtype=torch.bfloat16, ).to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) control_image = load_image( "https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Canny-alpha/resolve/main/canny.jpg" ).resize((512, 512)) prompt_embeds = torch.load( hf_hub_download(repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/prompt_embeds.pt") ).to(torch_device) pooled_prompt_embeds = torch.load( hf_hub_download( repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/pooled_prompt_embeds.pt" ) ).to(torch_device) output = pipe( prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, control_image=control_image, controlnet_conditioning_scale=0.6, num_inference_steps=2, guidance_scale=3.5, max_sequence_length=256, output_type="np", height=512, width=512, generator=generator, ) image = output.images[0] assert image.shape == (512, 512, 3) original_image = image[-3:, -3:, -1].flatten() expected_image = np.array([0.2734, 0.2852, 0.2852, 0.2734, 0.2754, 0.2891, 0.2617, 0.2637, 0.2773]) assert numpy_cosine_similarity_distance(original_image.flatten(), expected_image) < 1e-2
diffusers/tests/pipelines/controlnet_flux/test_controlnet_flux.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet_flux/test_controlnet_flux.py", "repo_id": "diffusers", "token_count": 4464 }
197
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, BertModel, T5EncoderModel from diffusers import AutoencoderKL, DDPMScheduler, HunyuanDiT2DModel, HunyuanDiTPipeline from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length, check_qkv_fusion_processors_exist, to_np, ) enable_full_determinism() class HunyuanDiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = HunyuanDiTPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) transformer = HunyuanDiT2DModel( sample_size=16, num_layers=2, patch_size=2, attention_head_dim=8, num_attention_heads=3, in_channels=4, cross_attention_dim=32, cross_attention_dim_t5=32, pooled_projection_dim=16, hidden_size=24, activation_fn="gelu-approximate", ) torch.manual_seed(0) vae = AutoencoderKL() scheduler = DDPMScheduler() text_encoder = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "use_resolution_binning": False, } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) expected_slice = np.array( [0.56939435, 0.34541583, 0.35915792, 0.46489206, 0.38775963, 0.45004836, 0.5957267, 0.59481275, 0.33287364] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) @unittest.skip("The HunyuanDiT Attention pooling layer does not support sequential CPU offloading.") def test_sequential_cpu_offload_forward_pass(self): # TODO(YiYi) need to fix later # This is because it instantiates it's attention layer from torch.nn.MultiheadAttention, which calls to # `torch.nn.functional.multi_head_attention_forward` with the weights and bias. Since the hook is never # triggered with a forward pass call, the weights stay on the CPU. There are more examples where we skip # this test because of MHA (example: HunyuanVideo Framepack) pass @unittest.skip("The HunyuanDiT Attention pooling layer does not support sequential CPU offloading.") def test_sequential_offload_forward_pass_twice(self): # TODO(YiYi) need to fix later # This is because it instantiates it's attention layer from torch.nn.MultiheadAttention, which calls to # `torch.nn.functional.multi_head_attention_forward` with the weights and bias. Since the hook is never # triggered with a forward pass call, the weights stay on the CPU. There are more examples where we skip # this test because of MHA (example: HunyuanVideo Framepack) pass def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=1e-3, ) def test_feed_forward_chunking(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_no_chunking = image[0, -3:, -3:, -1] pipe.transformer.enable_forward_chunking(chunk_size=1, dim=0) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_chunking = image[0, -3:, -3:, -1] max_diff = np.abs(to_np(image_slice_no_chunking) - to_np(image_slice_chunking)).max() self.assertLess(max_diff, 1e-4) def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image = pipe(**inputs)[0] original_image_slice = image[0, -3:, -3:, -1] pipe.transformer.fuse_qkv_projections() # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() assert check_qkv_fusion_processors_exist(pipe.transformer), ( "Something wrong with the fused attention processors. Expected all the attention processors to be fused." ) assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image_fused = pipe(**inputs)[0] image_slice_fused = image_fused[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) inputs["return_dict"] = False image_disabled = pipe(**inputs)[0] image_slice_disabled = image_disabled[0, -3:, -3:, -1] assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( "Fusion of QKV projections shouldn't affect the outputs." ) assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." ) assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( "Original outputs should match when fused QKV projections are disabled." ) @unittest.skip( "Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have." ) def test_encode_prompt_works_in_isolation(self): pass def test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) = pipe.encode_prompt(prompt, device=torch_device, dtype=torch.float32, text_encoder_index=0) ( prompt_embeds_2, negative_prompt_embeds_2, prompt_attention_mask_2, negative_prompt_attention_mask_2, ) = pipe.encode_prompt( prompt, device=torch_device, dtype=torch.float32, text_encoder_index=1, ) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "prompt_embeds_2": prompt_embeds_2, "prompt_attention_mask_2": prompt_attention_mask_2, "negative_prompt_embeds_2": negative_prompt_embeds_2, "negative_prompt_attention_mask_2": negative_prompt_attention_mask_2, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "prompt_embeds_2": prompt_embeds_2, "prompt_attention_mask_2": prompt_attention_mask_2, "negative_prompt_embeds_2": negative_prompt_embeds_2, "negative_prompt_attention_mask_2": negative_prompt_attention_mask_2, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) @slow @require_torch_accelerator class HunyuanDiTPipelineIntegrationTests(unittest.TestCase): prompt = "一个宇航员在骑马" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_hunyuan_dit_1024(self): generator = torch.Generator("cpu").manual_seed(0) pipe = HunyuanDiTPipeline.from_pretrained( "XCLiu/HunyuanDiT-0523", revision="refs/pr/2", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload(device=torch_device) prompt = self.prompt image = pipe( prompt=prompt, height=1024, width=1024, generator=generator, num_inference_steps=2, output_type="np" ).images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array( [0.48388672, 0.33789062, 0.30737305, 0.47875977, 0.25097656, 0.30029297, 0.4440918, 0.26953125, 0.30078125] ) max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) assert max_diff < 1e-3, f"Max diff is too high. got {image_slice.flatten()}"
diffusers/tests/pipelines/hunyuandit/test_hunyuan_dit.py/0
{ "file_path": "diffusers/tests/pipelines/hunyuandit/test_hunyuan_dit.py", "repo_id": "diffusers", "token_count": 6062 }
198
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DPMSolverMultistepScheduler, LEditsPPPipelineStableDiffusion, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, floats_tensor, load_image, require_torch_accelerator, skip_mps, slow, torch_device, ) enable_full_determinism() @skip_mps class LEditsPPPipelineStableDiffusionFastTests(unittest.TestCase): pipeline_class = LEditsPPPipelineStableDiffusion def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DPMSolverMultistepScheduler(algorithm_type="sde-dpmsolver++", solver_order=2) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "generator": generator, "editing_prompt": ["wearing glasses", "sunshine"], "reverse_editing_direction": [False, True], "edit_guidance_scale": [10.0, 5.0], } return inputs def get_dummy_inversion_inputs(self, device, seed=0): images = floats_tensor((2, 3, 32, 32), rng=random.Random(0)).cpu().permute(0, 2, 3, 1) images = 255 * images image_1 = Image.fromarray(np.uint8(images[0])).convert("RGB") image_2 = Image.fromarray(np.uint8(images[1])).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": [image_1, image_2], "source_prompt": "", "source_guidance_scale": 3.5, "num_inversion_steps": 20, "skip": 0.15, "generator": generator, } return inputs def test_ledits_pp_inversion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = LEditsPPPipelineStableDiffusion(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) inputs["image"] = inputs["image"][0] sd_pipe.invert(**inputs) assert sd_pipe.init_latents.shape == ( 1, 4, int(32 / sd_pipe.vae_scale_factor), int(32 / sd_pipe.vae_scale_factor), ) latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) expected_slice = np.array([-0.9084, -0.0367, 0.2940, 0.0839, 0.6890, 0.2651, -0.7104, 2.1090, -0.7822]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 def test_ledits_pp_inversion_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = LEditsPPPipelineStableDiffusion(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) sd_pipe.invert(**inputs) assert sd_pipe.init_latents.shape == ( 2, 4, int(32 / sd_pipe.vae_scale_factor), int(32 / sd_pipe.vae_scale_factor), ) latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) expected_slice = np.array([0.2528, 0.1458, -0.2166, 0.4565, -0.5657, -1.0286, -0.9961, 0.5933, 1.1173]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 latent_slice = sd_pipe.init_latents[1, -1, -3:, -3:].to(device) expected_slice = np.array([-0.0796, 2.0583, 0.5501, 0.5358, 0.0282, -0.2803, -1.0470, 0.7023, -0.0072]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 def test_ledits_pp_warmup_steps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LEditsPPPipelineStableDiffusion(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inversion_inputs = self.get_dummy_inversion_inputs(device) pipe.invert(**inversion_inputs) inputs = self.get_dummy_inputs(device) inputs["edit_warmup_steps"] = [0, 5] pipe(**inputs).images inputs["edit_warmup_steps"] = [5, 0] pipe(**inputs).images inputs["edit_warmup_steps"] = [5, 10] pipe(**inputs).images inputs["edit_warmup_steps"] = [10, 5] pipe(**inputs).images @slow @require_torch_accelerator class LEditsPPPipelineStableDiffusionSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png" ) raw_image = raw_image.convert("RGB").resize((512, 512)) cls.raw_image = raw_image def test_ledits_pp_editing(self): pipe = LEditsPPPipelineStableDiffusion.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) _ = pipe.invert(image=self.raw_image, generator=generator) generator = torch.manual_seed(0) inputs = { "generator": generator, "editing_prompt": ["cat", "dog"], "reverse_editing_direction": [True, False], "edit_guidance_scale": [5.0, 5.0], "edit_threshold": [0.8, 0.8], } reconstruction = pipe(**inputs, output_type="np").images[0] output_slice = reconstruction[150:153, 140:143, -1] output_slice = output_slice.flatten() expected_slices = Expectations( { ("xpu", 3): np.array( [ 0.9511719, 0.94140625, 0.87597656, 0.9472656, 0.9296875, 0.8378906, 0.94433594, 0.91503906, 0.8491211, ] ), ("cuda", 7): np.array( [ 0.9453125, 0.93310547, 0.84521484, 0.94628906, 0.9111328, 0.80859375, 0.93847656, 0.9042969, 0.8144531, ] ), } ) expected_slice = expected_slices.get_expectation() assert np.abs(output_slice - expected_slice).max() < 1e-2
diffusers/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py", "repo_id": "diffusers", "token_count": 4919 }
199
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, AutoencoderTiny, AutoPipelineForImage2Image, EulerDiscreteScheduler, StableDiffusionImg2ImgPipeline, StableDiffusionPAGImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_image, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class StableDiffusionPAGImg2ImgPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionPAGImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_tiny_autoencoder(self): return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "pag_scale": 0.9, "output_type": "np", } return inputs def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline (expect same output when pag is disabled) pipe_sd = StableDiffusionImg2ImgPipeline(**components) pipe_sd = pipe_sd.to(device) pipe_sd.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." ) out = pipe_sd(**inputs).images[0, -3:, -3:, -1] # pag disabled with pag_scale=0.0 pipe_pag = self.pipeline_class(**components) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["pag_scale"] = 0.0 out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] # pag enabled pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 def test_pag_inference(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe_pag(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == ( 1, 32, 32, 3, ), f"the shape of the output image should be (1, 32, 32, 3) but got {image.shape}" expected_slice = np.array( [0.44203848, 0.49598145, 0.42248967, 0.6707724, 0.5683791, 0.43603387, 0.58316565, 0.60077155, 0.5174199] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) @slow @require_torch_accelerator class StableDiffusionPAGImg2ImgPipelineIntegrationTests(unittest.TestCase): pipeline_class = StableDiffusionPAGImg2ImgPipeline repo_id = "Jiali/stable-diffusion-1.5" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "pag_scale": 3.0, "output_type": "np", } return inputs def test_pag_cfg(self): pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipeline(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( f"output is different from expected, {image_slice.flatten()}" ) def test_pag_uncond(self): pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, guidance_scale=0.0) image = pipeline(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( f"output is different from expected, {image_slice.flatten()}" )
diffusers/tests/pipelines/pag/test_pag_sd_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/pag/test_pag_sd_img2img.py", "repo_id": "diffusers", "token_count": 4967 }
200
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( T5EncoderModel, T5Tokenizer, ) from diffusers import ( AutoencoderOobleck, CosineDPMSolverMultistepScheduler, StableAudioDiTModel, StableAudioPipeline, StableAudioProjectionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, nightly, require_torch_accelerator, torch_device, ) from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class StableAudioPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableAudioPipeline params = frozenset( [ "prompt", "audio_end_in_s", "audio_start_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "initial_audio_waveforms", ] ) batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) # There is not xformers version of the StableAudioPipeline custom attention processor test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) transformer = StableAudioDiTModel( sample_size=4, in_channels=3, num_layers=2, attention_head_dim=4, num_key_value_attention_heads=2, out_channels=3, cross_attention_dim=4, time_proj_dim=8, global_states_input_dim=8, cross_attention_input_dim=4, ) scheduler = CosineDPMSolverMultistepScheduler( solver_order=2, prediction_type="v_prediction", sigma_data=1.0, sigma_schedule="exponential", ) torch.manual_seed(0) vae = AutoencoderOobleck( encoder_hidden_size=6, downsampling_ratios=[1, 2], decoder_channels=3, decoder_input_channels=3, audio_channels=2, channel_multiples=[2, 4], sampling_rate=4, ) torch.manual_seed(0) t5_repo_id = "hf-internal-testing/tiny-random-T5ForConditionalGeneration" text_encoder = T5EncoderModel.from_pretrained(t5_repo_id) tokenizer = T5Tokenizer.from_pretrained(t5_repo_id, truncation=True, model_max_length=25) torch.manual_seed(0) projection_model = StableAudioProjectionModel( text_encoder_dim=text_encoder.config.d_model, conditioning_dim=4, min_value=0, max_value=32, ) components = { "transformer": transformer, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "projection_model": projection_model, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_save_load_local(self): # increase tolerance from 1e-4 -> 7e-3 to account for large composite model super().test_save_load_local(expected_max_difference=7e-3) def test_save_load_optional_components(self): # increase tolerance from 1e-4 -> 7e-3 to account for large composite model super().test_save_load_optional_components(expected_max_difference=7e-3) def test_stable_audio_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() stable_audio_pipe = StableAudioPipeline(**components) stable_audio_pipe = stable_audio_pipe.to(torch_device) stable_audio_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = stable_audio_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 2 assert audio.shape == (2, 7) def test_stable_audio_without_prompts(self): components = self.get_dummy_components() stable_audio_pipe = StableAudioPipeline(**components) stable_audio_pipe = stable_audio_pipe.to(torch_device) stable_audio_pipe = stable_audio_pipe.to(torch_device) stable_audio_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = stable_audio_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = stable_audio_pipe.tokenizer( prompt, padding="max_length", max_length=stable_audio_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ).to(torch_device) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask prompt_embeds = stable_audio_pipe.text_encoder( text_input_ids, attention_mask=attention_mask, )[0] inputs["prompt_embeds"] = prompt_embeds inputs["attention_mask"] = attention_mask # forward output = stable_audio_pipe(**inputs) audio_2 = output.audios[0] assert (audio_1 - audio_2).abs().max() < 1e-2 def test_stable_audio_negative_without_prompts(self): components = self.get_dummy_components() stable_audio_pipe = StableAudioPipeline(**components) stable_audio_pipe = stable_audio_pipe.to(torch_device) stable_audio_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = stable_audio_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = stable_audio_pipe.tokenizer( prompt, padding="max_length", max_length=stable_audio_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ).to(torch_device) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask prompt_embeds = stable_audio_pipe.text_encoder( text_input_ids, attention_mask=attention_mask, )[0] inputs["prompt_embeds"] = prompt_embeds inputs["attention_mask"] = attention_mask negative_text_inputs = stable_audio_pipe.tokenizer( negative_prompt, padding="max_length", max_length=stable_audio_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ).to(torch_device) negative_text_input_ids = negative_text_inputs.input_ids negative_attention_mask = negative_text_inputs.attention_mask negative_prompt_embeds = stable_audio_pipe.text_encoder( negative_text_input_ids, attention_mask=negative_attention_mask, )[0] inputs["negative_prompt_embeds"] = negative_prompt_embeds inputs["negative_attention_mask"] = negative_attention_mask # forward output = stable_audio_pipe(**inputs) audio_2 = output.audios[0] assert (audio_1 - audio_2).abs().max() < 1e-2 def test_stable_audio_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() stable_audio_pipe = StableAudioPipeline(**components) stable_audio_pipe = stable_audio_pipe.to(device) stable_audio_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = stable_audio_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 2 assert audio.shape == (2, 7) def test_stable_audio_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() stable_audio_pipe = StableAudioPipeline(**components) stable_audio_pipe = stable_audio_pipe.to(device) stable_audio_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = stable_audio_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 2, 7) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = stable_audio_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 2, 7) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = stable_audio_pipe( prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (num_waveforms_per_prompt, 2, 7) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = stable_audio_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2, 7) def test_stable_audio_audio_end_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() stable_audio_pipe = StableAudioPipeline(**components) stable_audio_pipe = stable_audio_pipe.to(torch_device) stable_audio_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = stable_audio_pipe(audio_end_in_s=1.5, **inputs) audio = output.audios[0] assert audio.ndim == 2 assert audio.shape[1] / stable_audio_pipe.vae.sampling_rate == 1.5 output = stable_audio_pipe(audio_end_in_s=1.1875, **inputs) audio = output.audios[0] assert audio.ndim == 2 assert audio.shape[1] / stable_audio_pipe.vae.sampling_rate == 1.0 def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=5e-4) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) def test_stable_audio_input_waveform(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() stable_audio_pipe = StableAudioPipeline(**components) stable_audio_pipe = stable_audio_pipe.to(device) stable_audio_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" initial_audio_waveforms = torch.ones((1, 5)) # test raises error when no sampling rate with self.assertRaises(ValueError): audios = stable_audio_pipe( prompt, num_inference_steps=2, initial_audio_waveforms=initial_audio_waveforms ).audios # test raises error when wrong sampling rate with self.assertRaises(ValueError): audios = stable_audio_pipe( prompt, num_inference_steps=2, initial_audio_waveforms=initial_audio_waveforms, initial_audio_sampling_rate=stable_audio_pipe.vae.sampling_rate - 1, ).audios audios = stable_audio_pipe( prompt, num_inference_steps=2, initial_audio_waveforms=initial_audio_waveforms, initial_audio_sampling_rate=stable_audio_pipe.vae.sampling_rate, ).audios assert audios.shape == (1, 2, 7) # test works with num_waveforms_per_prompt num_waveforms_per_prompt = 2 audios = stable_audio_pipe( prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt, initial_audio_waveforms=initial_audio_waveforms, initial_audio_sampling_rate=stable_audio_pipe.vae.sampling_rate, ).audios assert audios.shape == (num_waveforms_per_prompt, 2, 7) # test num_waveforms_per_prompt for batch of prompts and input audio (two channels) batch_size = 2 initial_audio_waveforms = torch.ones((batch_size, 2, 5)) audios = stable_audio_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt, initial_audio_waveforms=initial_audio_waveforms, initial_audio_sampling_rate=stable_audio_pipe.vae.sampling_rate, ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2, 7) @unittest.skip("Not supported yet") def test_sequential_cpu_offload_forward_pass(self): pass @unittest.skip("Not supported yet") def test_sequential_offload_forward_pass_twice(self): pass @unittest.skip("Test not supported because `rotary_embed_dim` doesn't have any sensible default.") def test_encode_prompt_works_in_isolation(self): pass @nightly @require_torch_accelerator class StableAudioPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 64, 1024)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "audio_end_in_s": 30, "guidance_scale": 2.5, } return inputs def test_stable_audio(self): stable_audio_pipe = StableAudioPipeline.from_pretrained("stabilityai/stable-audio-open-1.0") stable_audio_pipe = stable_audio_pipe.to(torch_device) stable_audio_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = stable_audio_pipe(**inputs).audios[0] assert audio.ndim == 2 assert audio.shape == (2, int(inputs["audio_end_in_s"] * stable_audio_pipe.vae.sampling_rate)) # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[0, 447590:447600] # fmt: off expected_slices = Expectations( { ("xpu", 3): np.array([-0.0285, 0.1083, 0.1863, 0.3165, 0.5312, 0.6971, 0.6958, 0.6177, 0.5598, 0.5048]), ("cuda", 7): np.array([-0.0278, 0.1096, 0.1877, 0.3178, 0.5329, 0.6990, 0.6972, 0.6186, 0.5608, 0.5060]), ("cuda", 8): np.array([-0.0285, 0.1082, 0.1862, 0.3163, 0.5306, 0.6964, 0.6953, 0.6172, 0.5593, 0.5044]), } ) # fmt: on expected_slice = expected_slices.get_expectation() max_diff = np.abs(expected_slice - audio_slice.detach().cpu().numpy()).max() assert max_diff < 1.5e-3
diffusers/tests/pipelines/stable_audio/test_stable_audio.py/0
{ "file_path": "diffusers/tests/pipelines/stable_audio/test_stable_audio.py", "repo_id": "diffusers", "token_count": 7964 }
201
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPTextConfig, CLIPTextModel, CLIPTokenizer, DPTConfig, DPTForDepthEstimation, DPTImageProcessor, ) from diffusers import ( AutoencoderKL, PNDMScheduler, StableDiffusionDepth2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_accelerate_version_greater, require_accelerator, require_torch_accelerator, skip_mps, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionDepth2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionDepth2ImgPipeline test_save_load_optional_components = False params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"depth_mask"}) supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=5, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [96, 192, 384, 768], "num_groups": 2, } depth_estimator_config = DPTConfig( image_size=32, patch_size=16, num_channels=3, hidden_size=32, num_hidden_layers=4, backbone_out_indices=(0, 1, 2, 3), num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, is_decoder=False, initializer_range=0.02, is_hybrid=True, backbone_config=backbone_config, backbone_featmap_shape=[1, 384, 24, 24], ) depth_estimator = DPTForDepthEstimation(depth_estimator_config).eval() feature_extractor = DPTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-DPTForDepthEstimation") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "depth_estimator": depth_estimator, "feature_extractor": feature_extractor, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_save_load_local(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_save_load_float16(self): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.") @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_float16_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) for name, module in components.items(): if hasattr(module, "half"): components[name] = module.half() pipe_fp16 = self.pipeline_class(**components) pipe_fp16.to(torch_device) pipe_fp16.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(torch_device))[0] output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] max_diff = np.abs(output - output_fp16).max() self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.") @require_accelerator @require_accelerate_version_greater("0.14.0") def test_cpu_offload_forward_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-4, "CPU offloading should not affect the inference results") def test_dict_tuple_outputs_equivalent(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(torch_device))[0] output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0] max_diff = np.abs(output - output_tuple).max() self.assertLess(max_diff, 1e-4) def test_stable_diffusion_depth2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6071, 0.5035, 0.4378, 0.5776, 0.5753, 0.4316, 0.4513, 0.5263, 0.4546]) else: expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6296, 0.5125, 0.3890, 0.4456, 0.5955, 0.4621, 0.3810, 0.5310, 0.4626]) else: expected_slice = np.array([0.6012, 0.4507, 0.3769, 0.4121, 0.5566, 0.4585, 0.3803, 0.5045, 0.4631]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = 2 * [inputs["image"]] image = pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6501, 0.5150, 0.4939, 0.6688, 0.5437, 0.5758, 0.5115, 0.4406, 0.4551]) else: expected_slice = np.array([0.6557, 0.6214, 0.6254, 0.5775, 0.4785, 0.5949, 0.5904, 0.4785, 0.4730]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_pil(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] if torch_device == "mps": expected_slice = np.array([0.53232, 0.47015, 0.40868, 0.45651, 0.4891, 0.4668, 0.4287, 0.48822, 0.47439]) else: expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=7e-3) def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) @slow @require_torch_accelerator class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" ) inputs = { "prompt": "two tigers", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_depth2img_pipeline_default(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(expected_slice - image_slice).max() < 6e-1 @nightly @require_torch_accelerator class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" ) inputs = { "prompt": "two tigers", "image": init_image, "generator": generator, "num_inference_steps": 2, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_depth2img(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py", "repo_id": "diffusers", "token_count": 8260 }
202
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LCMScheduler, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLPipeline, UNet2DConditionModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, load_image, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDFunctionTesterMixin, ) enable_full_determinism() class StableDiffusionXLPipelineFastTests( SDFunctionTesterMixin, IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(2, 4), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, norm_num_groups=1, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", } return inputs def test_stable_diffusion_xl_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5388, 0.5452, 0.4694, 0.4583, 0.5253, 0.4832, 0.5288, 0.5035, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_ays(self): from diffusers.schedulers import AysSchedules timestep_schedule = AysSchedules["StableDiffusionXLTimesteps"] sigma_schedule = AysSchedules["StableDiffusionXLSigmas"] device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 10 output = sd_pipe(**inputs).images inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = None inputs["timesteps"] = timestep_schedule output_ts = sd_pipe(**inputs).images inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = None inputs["sigmas"] = sigma_schedule output_sigmas = sd_pipe(**inputs).images assert np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3, ( "ays timesteps and ays sigmas should have the same outputs" ) assert np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3, ( "use ays timesteps should have different outputs" ) assert np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3, ( "use ays sigmas should have different outputs" ) def test_ip_adapter(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.5388, 0.5452, 0.4694, 0.4583, 0.5253, 0.4832, 0.5288, 0.5035, 0.4766]) return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @require_torch_accelerator def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.enable_model_cpu_offload(device=torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.enable_sequential_cpu_offload(device=torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 @unittest.skip("We test this functionality elsewhere already.") def test_save_load_optional_components(self): pass def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, expected_tss, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = { **inputs, **{ "denoising_end": 1.0 - (split / num_train_timesteps), "output_type": "latent", }, } latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = { **inputs, **{ "denoising_start": 1.0 - (split / num_train_timesteps), "image": latents, }, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" steps = 10 for split in [300, 700]: for scheduler_cls_timesteps in [ (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), ( HeunDiscreteScheduler, [ 901.0, 801.0, 801.0, 701.0, 701.0, 601.0, 601.0, 501.0, 501.0, 401.0, 401.0, 301.0, 301.0, 201.0, 201.0, 101.0, 101.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) @slow def test_stable_diffusion_two_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, expected_tss, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = { **inputs, **{ "denoising_end": 1.0 - (split / num_train_timesteps), "output_type": "latent", }, } latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = { **inputs, **{ "denoising_start": 1.0 - (split / num_train_timesteps), "image": latents, }, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" steps = 10 for split in [300, 500, 700]: for scheduler_cls_timesteps in [ (DDIMScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), (DPMSolverMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), (UniPCMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), ( HeunDiscreteScheduler, [ 901.0, 801.0, 801.0, 701.0, 701.0, 601.0, 601.0, 501.0, 501.0, 401.0, 401.0, 301.0, 301.0, 201.0, 201.0, 101.0, 101.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) steps = 25 for split in [300, 500, 700]: for scheduler_cls_timesteps in [ ( DDIMScheduler, [ 961, 921, 881, 841, 801, 761, 721, 681, 641, 601, 561, 521, 481, 441, 401, 361, 321, 281, 241, 201, 161, 121, 81, 41, 1, ], ), ( EulerDiscreteScheduler, [ 961.0, 921.0, 881.0, 841.0, 801.0, 761.0, 721.0, 681.0, 641.0, 601.0, 561.0, 521.0, 481.0, 441.0, 401.0, 361.0, 321.0, 281.0, 241.0, 201.0, 161.0, 121.0, 81.0, 41.0, 1.0, ], ), ( DPMSolverMultistepScheduler, [ 951, 913, 875, 837, 799, 761, 723, 685, 647, 609, 571, 533, 495, 457, 419, 381, 343, 305, 267, 229, 191, 153, 115, 77, 39, ], ), ( UniPCMultistepScheduler, [ 951, 913, 875, 837, 799, 761, 723, 685, 647, 609, 571, 533, 495, 457, 419, 381, 343, 305, 267, 229, 191, 153, 115, 77, 39, ], ), ( HeunDiscreteScheduler, [ 961.0, 921.0, 921.0, 881.0, 881.0, 841.0, 841.0, 801.0, 801.0, 761.0, 761.0, 721.0, 721.0, 681.0, 681.0, 641.0, 641.0, 601.0, 601.0, 561.0, 561.0, 521.0, 521.0, 481.0, 481.0, 441.0, 441.0, 401.0, 401.0, 361.0, 361.0, 321.0, 321.0, 281.0, 281.0, 241.0, 241.0, 201.0, 201.0, 161.0, 161.0, 121.0, 121.0, 81.0, 81.0, 41.0, 41.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) @slow def test_stable_diffusion_three_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() pipe_3 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_3.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split_1, split_2, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list( filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) ) expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 else: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, ( f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" ) with self.assertRaises(ValueError) as cm: inputs_2 = { **inputs, **{ "denoising_start": split_2, "denoising_end": split_1, "image": latents, "output_type": "latent", }, } pipe_2(**inputs_2).images[0] assert "cannot be larger than or equal to `denoising_end`" in str(cm.exception) inputs_2 = { **inputs, **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] assert expected_steps == done_steps, ( f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" ) for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split_1, split_2, scheduler_cls) def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_cond = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=(0, 0), negative_target_size=(1024, 1024), ).images image_slice_with_neg_cond = image[0, -3:, -3:, -1] self.assertTrue(np.abs(image_slice_with_no_neg_cond - image_slice_with_neg_cond).max() > 1e-2) def test_stable_diffusion_xl_save_from_pretrained(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) pipes.append(sd_pipe) with tempfile.TemporaryDirectory() as tmpdirname: sd_pipe.save_pretrained(tmpdirname) sd_pipe = StableDiffusionXLPipeline.from_pretrained(tmpdirname).to(torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) @slow class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_stable_diffusion_lcm(self): torch.manual_seed(0) unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-ssd-1b", torch_dtype=torch.float16, variant="fp16" ) sd_pipe = StableDiffusionXLPipeline.from_pretrained( "segmind/SSD-1B", unet=unet, torch_dtype=torch.float16, variant="fp16" ).to(torch_device) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) prompt = "a red car standing on the side of the street" image = sd_pipe( prompt, num_inference_steps=4, guidance_scale=8.0, generator=torch.Generator("cpu").manual_seed(0) ).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_full/stable_diffusion_ssd_1b_lcm.png" ) image = sd_pipe.image_processor.pil_to_numpy(image) expected_image = sd_pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-2
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 20350 }
203
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class OnnxPipelineTesterMixin: """ This mixin is designed to be used with unittest.TestCase classes. It provides a set of common tests for each ONNXRuntime pipeline, e.g. saving and loading the pipeline, equivalence of dict and tuple outputs, etc. """ pass
diffusers/tests/pipelines/test_pipelines_onnx_common.py/0
{ "file_path": "diffusers/tests/pipelines/test_pipelines_onnx_common.py", "repo_id": "diffusers", "token_count": 118 }
204
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from diffusers import ( ControlNetModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) enable_full_determinism() @slow @require_torch_accelerator class ControlNetModelSingleFileTests(unittest.TestCase): model_class = ControlNetModel ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" repo_id = "lllyasviel/control_v11p_sd15_canny" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_single_file_components(self): model = self.model_class.from_pretrained(self.repo_id) model_single_file = self.model_class.from_single_file(self.ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between single file loading and pretrained loading" ) def test_single_file_arguments(self): model_default = self.model_class.from_single_file(self.ckpt_path) assert model_default.config.upcast_attention is False assert model_default.dtype == torch.float32 torch_dtype = torch.float16 upcast_attention = True model = self.model_class.from_single_file( self.ckpt_path, upcast_attention=upcast_attention, torch_dtype=torch_dtype, ) assert model.config.upcast_attention == upcast_attention assert model.dtype == torch_dtype
diffusers/tests/single_file/test_model_controlnet_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_model_controlnet_single_file.py", "repo_id": "diffusers", "token_count": 1018 }
205
import json import logging import os from collections import defaultdict from pathlib import Path from huggingface_hub import HfApi import diffusers PATH_TO_REPO = Path(__file__).parent.parent.resolve() ALWAYS_TEST_PIPELINE_MODULES = [ "controlnet", "controlnet_flux", "controlnet_sd3", "stable_diffusion", "stable_diffusion_2", "stable_diffusion_3", "stable_diffusion_xl", "ip_adapters", "flux", ] PIPELINE_USAGE_CUTOFF = int(os.getenv("PIPELINE_USAGE_CUTOFF", 50000)) logger = logging.getLogger(__name__) api = HfApi() def filter_pipelines(usage_dict, usage_cutoff=10000): output = [] for diffusers_object, usage in usage_dict.items(): if usage < usage_cutoff: continue is_diffusers_pipeline = hasattr(diffusers.pipelines, diffusers_object) if not is_diffusers_pipeline: continue output.append(diffusers_object) return output def fetch_pipeline_objects(): models = api.list_models(library="diffusers") downloads = defaultdict(int) for model in models: is_counted = False for tag in model.tags: if tag.startswith("diffusers:"): is_counted = True downloads[tag[len("diffusers:") :]] += model.downloads if not is_counted: downloads["other"] += model.downloads # Remove 0 downloads downloads = {k: v for k, v in downloads.items() if v > 0} pipeline_objects = filter_pipelines(downloads, PIPELINE_USAGE_CUTOFF) return pipeline_objects def fetch_pipeline_modules_to_test(): try: pipeline_objects = fetch_pipeline_objects() except Exception as e: logger.error(e) raise RuntimeError("Unable to fetch model list from HuggingFace Hub.") test_modules = [] for pipeline_name in pipeline_objects: module = getattr(diffusers, pipeline_name) test_module = module.__module__.split(".")[-2].strip() test_modules.append(test_module) return test_modules def main(): test_modules = fetch_pipeline_modules_to_test() test_modules.extend(ALWAYS_TEST_PIPELINE_MODULES) # Get unique modules test_modules = sorted(set(test_modules)) print(json.dumps(test_modules)) save_path = f"{PATH_TO_REPO}/reports" os.makedirs(save_path, exist_ok=True) with open(f"{save_path}/test-pipelines.json", "w") as f: json.dump({"pipeline_test_modules": test_modules}, f) if __name__ == "__main__": main()
diffusers/utils/fetch_torch_cuda_pipeline_test_matrix.py/0
{ "file_path": "diffusers/utils/fetch_torch_cuda_pipeline_test_matrix.py", "repo_id": "diffusers", "token_count": 1041 }
206
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e . -r docs-requirements.txt ``` You will also need `nodejs`. Please refer to their [installation page](https://nodejs.org/en/download) --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to `git commit` the built documentation. --- ## Building the documentation Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing the following command: ```bash doc-builder build lerobot docs/source/ --build_dir ~/tmp/test-build ``` You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite Markdown editor. ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview lerobot docs/source/ ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/lerobot/blob/main/docs/source/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ``` Sections that were moved: [ <a href="#section-b">Section A</a><a id="section-a"></a> ] ``` and of course, if you moved it to another file, then: ``` Sections that were moved: [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md). ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). - Link that file in `./source/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR. ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None or any strings should usually be put in `code`. #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ``` # first line of code # second line # etc ``` ```` #### Adding an image Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset.
lerobot/docs/README.md/0
{ "file_path": "lerobot/docs/README.md", "repo_id": "lerobot", "token_count": 1467 }
207
# 🤗 LeRobot Notebooks This repository contains example notebooks for using LeRobot. These notebooks demonstrate how to train policies on real or simulation datasets using standardized policies. --- ### Training ACT [ACT](https://huggingface.co/papers/2304.13705) (Action Chunking Transformer) is a transformer-based policy architecture for imitation learning that processes robot states and camera inputs to generate smooth, chunked action sequences. We provide a ready-to-run Google Colab notebook to help you train ACT policies using datasets from the Hugging Face Hub, with optional logging to Weights & Biases. | Notebook | Colab | | :------------------------------------------------------------------------------------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [Train ACT with LeRobot](https://github.com/huggingface/notebooks/blob/main/lerobot/training-act.ipynb) | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-act.ipynb) | Expected training time for 100k steps: ~1.5 hours on an NVIDIA A100 GPU with batch size of `64`. ### Training SmolVLA [SmolVLA](https://huggingface.co/papers/2506.01844) is a small but efficient Vision-Language-Action model. It is compact in size with 450 M-parameter and is developed by Hugging Face. We provide a ready-to-run Google Colab notebook to help you train SmolVLA policies using datasets from the Hugging Face Hub, with optional logging to Weights & Biases. | Notebook | Colab | | :-------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | [Train SmolVLA with LeRobot](https://github.com/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/lerobot/training-smolvla.ipynb) | Expected training time for 20k steps: ~5 hours on an NVIDIA A100 GPU with batch size of `64`.
lerobot/docs/source/notebooks.mdx/0
{ "file_path": "lerobot/docs/source/notebooks.mdx", "repo_id": "lerobot", "token_count": 1126 }
208
import time from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.robots.lekiwi.config_lekiwi import LeKiwiClientConfig from lerobot.robots.lekiwi.lekiwi_client import LeKiwiClient from lerobot.utils.robot_utils import busy_wait from lerobot.utils.utils import log_say EPISODE_IDX = 0 robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi") robot = LeKiwiClient(robot_config) dataset = LeRobotDataset("<hf_username>/<dataset_repo_id>", episodes=[EPISODE_IDX]) actions = dataset.hf_dataset.select_columns("action") robot.connect() if not robot.is_connected: raise ValueError("Robot is not connected!") log_say(f"Replaying episode {EPISODE_IDX}") for idx in range(dataset.num_frames): t0 = time.perf_counter() action = { name: float(actions[idx]["action"][i]) for i, name in enumerate(dataset.features["action"]["names"]) } robot.send_action(action) busy_wait(max(1.0 / dataset.fps - (time.perf_counter() - t0), 0.0)) robot.disconnect()
lerobot/examples/lekiwi/replay.py/0
{ "file_path": "lerobot/examples/lekiwi/replay.py", "repo_id": "lerobot", "token_count": 418 }
209
# requirements.in # requirements-macos.txt was generated on macOS and is platform-specific (macOS 15.5 24F74 arm64). # Darwin MacBook-Pro.local 24.5.0 Darwin Kernel Version 24.5.0: Tue Apr 22 19:54:43 PDT 2025; root:xnu-11417.121.6~2/RELEASE_ARM64_T8132 arm64 # requirements-ubuntu.txt was generated on Linux and is platform-specific (Ubuntu 24.04.2 LTS x86_64). # Linux mlerobot-linux 6.14.0-27-generic #27~24.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue Jul 22 17:38:49 UTC 2 x86_64 x86_64 x86_64 GNU/Linux -e .[all]
lerobot/requirements.in/0
{ "file_path": "lerobot/requirements.in", "repo_id": "lerobot", "token_count": 197 }
210
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect import pkgutil import sys from argparse import ArgumentError from collections.abc import Sequence from functools import wraps from pathlib import Path import draccus from lerobot.utils.utils import has_method PATH_KEY = "path" PLUGIN_DISCOVERY_SUFFIX = "discover_packages_path" def get_cli_overrides(field_name: str, args: Sequence[str] | None = None) -> list[str] | None: """Parses arguments from cli at a given nested attribute level. For example, supposing the main script was called with: python myscript.py --arg1=1 --arg2.subarg1=abc --arg2.subarg2=some/path If called during execution of myscript.py, get_cli_overrides("arg2") will return: ["--subarg1=abc" "--subarg2=some/path"] """ if args is None: args = sys.argv[1:] attr_level_args = [] detect_string = f"--{field_name}." exclude_strings = (f"--{field_name}.{draccus.CHOICE_TYPE_KEY}=", f"--{field_name}.{PATH_KEY}=") for arg in args: if arg.startswith(detect_string) and not arg.startswith(exclude_strings): denested_arg = f"--{arg.removeprefix(detect_string)}" attr_level_args.append(denested_arg) return attr_level_args def parse_arg(arg_name: str, args: Sequence[str] | None = None) -> str | None: if args is None: args = sys.argv[1:] prefix = f"--{arg_name}=" for arg in args: if arg.startswith(prefix): return arg[len(prefix) :] return None def parse_plugin_args(plugin_arg_suffix: str, args: Sequence[str]) -> dict: """Parse plugin-related arguments from command-line arguments. This function extracts arguments from command-line arguments that match a specified suffix pattern. It processes arguments in the format '--key=value' and returns them as a dictionary. Args: plugin_arg_suffix (str): The suffix to identify plugin-related arguments. cli_args (Sequence[str]): A sequence of command-line arguments to parse. Returns: dict: A dictionary containing the parsed plugin arguments where: - Keys are the argument names (with '--' prefix removed if present) - Values are the corresponding argument values Example: >>> args = ["--env.discover_packages_path=my_package", "--other_arg=value"] >>> parse_plugin_args("discover_packages_path", args) {'env.discover_packages_path': 'my_package'} """ plugin_args = {} for arg in args: if "=" in arg and plugin_arg_suffix in arg: key, value = arg.split("=", 1) # Remove leading '--' if present if key.startswith("--"): key = key[2:] plugin_args[key] = value return plugin_args class PluginLoadError(Exception): """Raised when a plugin fails to load.""" def load_plugin(plugin_path: str) -> None: """Load and initialize a plugin from a given Python package path. This function attempts to load a plugin by importing its package and any submodules. Plugin registration is expected to happen during package initialization, i.e. when the package is imported the gym environment should be registered and the config classes registered with their parents using the `register_subclass` decorator. Args: plugin_path (str): The Python package path to the plugin (e.g. "mypackage.plugins.myplugin") Raises: PluginLoadError: If the plugin cannot be loaded due to import errors or if the package path is invalid. Examples: >>> load_plugin("external_plugin.core") # Loads plugin from external package Notes: - The plugin package should handle its own registration during import - All submodules in the plugin package will be imported - Implementation follows the plugin discovery pattern from Python packaging guidelines See Also: https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/ """ try: package_module = importlib.import_module(plugin_path, __package__) except (ImportError, ModuleNotFoundError) as e: raise PluginLoadError( f"Failed to load plugin '{plugin_path}'. Verify the path and installation: {str(e)}" ) from e def iter_namespace(ns_pkg): return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + ".") try: for _finder, pkg_name, _ispkg in iter_namespace(package_module): importlib.import_module(pkg_name) except ImportError as e: raise PluginLoadError( f"Failed to load plugin '{plugin_path}'. Verify the path and installation: {str(e)}" ) from e def get_path_arg(field_name: str, args: Sequence[str] | None = None) -> str | None: return parse_arg(f"{field_name}.{PATH_KEY}", args) def get_type_arg(field_name: str, args: Sequence[str] | None = None) -> str | None: return parse_arg(f"{field_name}.{draccus.CHOICE_TYPE_KEY}", args) def filter_arg(field_to_filter: str, args: Sequence[str] | None = None) -> list[str]: return [arg for arg in args if not arg.startswith(f"--{field_to_filter}=")] def filter_path_args(fields_to_filter: str | list[str], args: Sequence[str] | None = None) -> list[str]: """ Filters command-line arguments related to fields with specific path arguments. Args: fields_to_filter (str | list[str]): A single str or a list of str whose arguments need to be filtered. args (Sequence[str] | None): The sequence of command-line arguments to be filtered. Defaults to None. Returns: list[str]: A filtered list of arguments, with arguments related to the specified fields removed. Raises: ArgumentError: If both a path argument (e.g., `--field_name.path`) and a type argument (e.g., `--field_name.type`) are specified for the same field. """ if isinstance(fields_to_filter, str): fields_to_filter = [fields_to_filter] filtered_args = args for field in fields_to_filter: if get_path_arg(field, args): if get_type_arg(field, args): raise ArgumentError( argument=None, message=f"Cannot specify both --{field}.{PATH_KEY} and --{field}.{draccus.CHOICE_TYPE_KEY}", ) filtered_args = [arg for arg in filtered_args if not arg.startswith(f"--{field}.")] return filtered_args def wrap(config_path: Path | None = None): """ HACK: Similar to draccus.wrap but does three additional things: - Will remove '.path' arguments from CLI in order to process them later on. - If a 'config_path' is passed and the main config class has a 'from_pretrained' method, will initialize it from there to allow to fetch configs from the hub directly - Will load plugins specified in the CLI arguments. These plugins will typically register their own subclasses of config classes, so that draccus can find the right class to instantiate from the CLI '.type' arguments """ def wrapper_outer(fn): @wraps(fn) def wrapper_inner(*args, **kwargs): argspec = inspect.getfullargspec(fn) argtype = argspec.annotations[argspec.args[0]] if len(args) > 0 and type(args[0]) is argtype: cfg = args[0] args = args[1:] else: cli_args = sys.argv[1:] plugin_args = parse_plugin_args(PLUGIN_DISCOVERY_SUFFIX, cli_args) for plugin_cli_arg, plugin_path in plugin_args.items(): try: load_plugin(plugin_path) except PluginLoadError as e: # add the relevant CLI arg to the error message raise PluginLoadError(f"{e}\nFailed plugin CLI Arg: {plugin_cli_arg}") from e cli_args = filter_arg(plugin_cli_arg, cli_args) config_path_cli = parse_arg("config_path", cli_args) if has_method(argtype, "__get_path_fields__"): path_fields = argtype.__get_path_fields__() cli_args = filter_path_args(path_fields, cli_args) if has_method(argtype, "from_pretrained") and config_path_cli: cli_args = filter_arg("config_path", cli_args) cfg = argtype.from_pretrained(config_path_cli, cli_args=cli_args) else: cfg = draccus.parse(config_class=argtype, config_path=config_path, args=cli_args) response = fn(cfg, *args, **kwargs) return response return wrapper_inner return wrapper_outer
lerobot/src/lerobot/configs/parser.py/0
{ "file_path": "lerobot/src/lerobot/configs/parser.py", "repo_id": "lerobot", "token_count": 3670 }
211
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import os from dataclasses import dataclass os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1" from lerobot.motors import MotorCalibration, MotorsBus BAR_LEN, BAR_THICKNESS = 450, 8 HANDLE_R = 10 BRACKET_W, BRACKET_H = 6, 14 TRI_W, TRI_H = 12, 14 BTN_W, BTN_H = 60, 22 SAVE_W, SAVE_H = 80, 28 LOAD_W = 80 DD_W, DD_H = 160, 28 TOP_GAP = 50 PADDING_Y, TOP_OFFSET = 70, 60 FONT_SIZE, FPS = 20, 60 BG_COLOR = (30, 30, 30) BAR_RED, BAR_GREEN = (200, 60, 60), (60, 200, 60) HANDLE_COLOR, TEXT_COLOR = (240, 240, 240), (250, 250, 250) TICK_COLOR = (250, 220, 40) BTN_COLOR, BTN_COLOR_HL = (80, 80, 80), (110, 110, 110) DD_COLOR, DD_COLOR_HL = (70, 70, 70), (100, 100, 100) def dist(a, b): return math.hypot(a[0] - b[0], a[1] - b[1]) @dataclass class RangeValues: min_v: int pos_v: int max_v: int class RangeSlider: """One motor = one slider row""" def __init__(self, motor, idx, res, calibration, present, label_pad, base_y): import pygame self.motor = motor self.res = res self.x0 = 40 + label_pad self.x1 = self.x0 + BAR_LEN self.y = base_y + idx * PADDING_Y self.min_v = calibration.range_min self.max_v = calibration.range_max self.pos_v = max(self.min_v, min(present, self.max_v)) self.min_x = self._pos_from_val(self.min_v) self.max_x = self._pos_from_val(self.max_v) self.pos_x = self._pos_from_val(self.pos_v) self.min_btn = pygame.Rect(self.x0 - BTN_W - 6, self.y - BTN_H // 2, BTN_W, BTN_H) self.max_btn = pygame.Rect(self.x1 + 6, self.y - BTN_H // 2, BTN_W, BTN_H) self.drag_min = self.drag_max = self.drag_pos = False self.tick_val = present self.font = pygame.font.Font(None, FONT_SIZE) def _val_from_pos(self, x): return round((x - self.x0) / BAR_LEN * self.res) def _pos_from_val(self, v): return self.x0 + (v / self.res) * BAR_LEN def set_tick(self, v): self.tick_val = max(0, min(v, self.res)) def _triangle_hit(self, pos): import pygame tri_top = self.y - BAR_THICKNESS // 2 - 2 return pygame.Rect(self.pos_x - TRI_W // 2, tri_top - TRI_H, TRI_W, TRI_H).collidepoint(pos) def handle_event(self, e): import pygame if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1: if self.min_btn.collidepoint(e.pos): self.min_x, self.min_v = self.pos_x, self.pos_v return if self.max_btn.collidepoint(e.pos): self.max_x, self.max_v = self.pos_x, self.pos_v return if dist(e.pos, (self.min_x, self.y)) <= HANDLE_R: self.drag_min = True elif dist(e.pos, (self.max_x, self.y)) <= HANDLE_R: self.drag_max = True elif self._triangle_hit(e.pos): self.drag_pos = True elif e.type == pygame.MOUSEBUTTONUP and e.button == 1: self.drag_min = self.drag_max = self.drag_pos = False elif e.type == pygame.MOUSEMOTION: x = e.pos[0] if self.drag_min: self.min_x = max(self.x0, min(x, self.pos_x)) elif self.drag_max: self.max_x = min(self.x1, max(x, self.pos_x)) elif self.drag_pos: self.pos_x = max(self.min_x, min(x, self.max_x)) self.min_v = self._val_from_pos(self.min_x) self.max_v = self._val_from_pos(self.max_x) self.pos_v = self._val_from_pos(self.pos_x) def _draw_button(self, surf, rect, text): import pygame clr = BTN_COLOR_HL if rect.collidepoint(pygame.mouse.get_pos()) else BTN_COLOR pygame.draw.rect(surf, clr, rect, border_radius=4) t = self.font.render(text, True, TEXT_COLOR) surf.blit(t, (rect.centerx - t.get_width() // 2, rect.centery - t.get_height() // 2)) def draw(self, surf): import pygame # motor name above set-min button (right-aligned) name_surf = self.font.render(self.motor, True, TEXT_COLOR) surf.blit( name_surf, (self.min_btn.right - name_surf.get_width(), self.min_btn.y - name_surf.get_height() - 4), ) # bar + active section pygame.draw.rect(surf, BAR_RED, (self.x0, self.y - BAR_THICKNESS // 2, BAR_LEN, BAR_THICKNESS)) pygame.draw.rect( surf, BAR_GREEN, (self.min_x, self.y - BAR_THICKNESS // 2, self.max_x - self.min_x, BAR_THICKNESS) ) # tick tick_x = self._pos_from_val(self.tick_val) pygame.draw.line( surf, TICK_COLOR, (tick_x, self.y - BAR_THICKNESS // 2 - 4), (tick_x, self.y + BAR_THICKNESS // 2 + 4), 2, ) # brackets for x, sign in ((self.min_x, +1), (self.max_x, -1)): pygame.draw.line( surf, HANDLE_COLOR, (x, self.y - BRACKET_H // 2), (x, self.y + BRACKET_H // 2), 2 ) pygame.draw.line( surf, HANDLE_COLOR, (x, self.y - BRACKET_H // 2), (x + sign * BRACKET_W, self.y - BRACKET_H // 2), 2, ) pygame.draw.line( surf, HANDLE_COLOR, (x, self.y + BRACKET_H // 2), (x + sign * BRACKET_W, self.y + BRACKET_H // 2), 2, ) # triangle ▼ tri_top = self.y - BAR_THICKNESS // 2 - 2 pygame.draw.polygon( surf, HANDLE_COLOR, [ (self.pos_x, tri_top), (self.pos_x - TRI_W // 2, tri_top - TRI_H), (self.pos_x + TRI_W // 2, tri_top - TRI_H), ], ) # numeric labels fh = self.font.get_height() pos_y = tri_top - TRI_H - 4 - fh txts = [ (self.min_v, self.min_x, self.y - BRACKET_H // 2 - 4 - fh), (self.max_v, self.max_x, self.y - BRACKET_H // 2 - 4 - fh), (self.pos_v, self.pos_x, pos_y), ] for v, x, y in txts: s = self.font.render(str(v), True, TEXT_COLOR) surf.blit(s, (x - s.get_width() // 2, y)) # buttons self._draw_button(surf, self.min_btn, "set min") self._draw_button(surf, self.max_btn, "set max") # external def values(self) -> RangeValues: return RangeValues(self.min_v, self.pos_v, self.max_v) class RangeFinderGUI: def __init__(self, bus: MotorsBus, groups: dict[str, list[str]] | None = None): import pygame self.bus = bus self.groups = groups if groups is not None else {"all": list(bus.motors)} self.group_names = list(groups) self.current_group = self.group_names[0] if not bus.is_connected: bus.connect() self.calibration = bus.read_calibration() self.res_table = bus.model_resolution_table self.present_cache = { m: bus.read("Present_Position", m, normalize=False) for motors in groups.values() for m in motors } pygame.init() self.font = pygame.font.Font(None, FONT_SIZE) label_pad = max(self.font.size(m)[0] for ms in groups.values() for m in ms) self.label_pad = label_pad width = 40 + label_pad + BAR_LEN + 6 + BTN_W + 10 + SAVE_W + 10 self.controls_bottom = 10 + SAVE_H self.base_y = self.controls_bottom + TOP_GAP height = self.base_y + PADDING_Y * len(groups[self.current_group]) + 40 self.screen = pygame.display.set_mode((width, height)) pygame.display.set_caption("Motors range finder") # ui rects self.save_btn = pygame.Rect(width - SAVE_W - 10, 10, SAVE_W, SAVE_H) self.load_btn = pygame.Rect(self.save_btn.left - LOAD_W - 10, 10, LOAD_W, SAVE_H) self.dd_btn = pygame.Rect(width // 2 - DD_W // 2, 10, DD_W, DD_H) self.dd_open = False # dropdown expanded? self.clock = pygame.time.Clock() self._build_sliders() self._adjust_height() def _adjust_height(self): import pygame motors = self.groups[self.current_group] new_h = self.base_y + PADDING_Y * len(motors) + 40 if new_h != self.screen.get_height(): w = self.screen.get_width() self.screen = pygame.display.set_mode((w, new_h)) def _build_sliders(self): self.sliders: list[RangeSlider] = [] motors = self.groups[self.current_group] for i, m in enumerate(motors): self.sliders.append( RangeSlider( motor=m, idx=i, res=self.res_table[self.bus.motors[m].model] - 1, calibration=self.calibration[m], present=self.present_cache[m], label_pad=self.label_pad, base_y=self.base_y, ) ) def _draw_dropdown(self): import pygame # collapsed box hover = self.dd_btn.collidepoint(pygame.mouse.get_pos()) pygame.draw.rect(self.screen, DD_COLOR_HL if hover else DD_COLOR, self.dd_btn, border_radius=6) txt = self.font.render(self.current_group, True, TEXT_COLOR) self.screen.blit( txt, (self.dd_btn.centerx - txt.get_width() // 2, self.dd_btn.centery - txt.get_height() // 2) ) tri_w, tri_h = 12, 6 cx = self.dd_btn.right - 14 cy = self.dd_btn.centery + 1 pygame.draw.polygon( self.screen, TEXT_COLOR, [(cx - tri_w // 2, cy - tri_h // 2), (cx + tri_w // 2, cy - tri_h // 2), (cx, cy + tri_h // 2)], ) if not self.dd_open: return # expanded list for i, name in enumerate(self.group_names): item_rect = pygame.Rect(self.dd_btn.left, self.dd_btn.bottom + i * DD_H, DD_W, DD_H) clr = DD_COLOR_HL if item_rect.collidepoint(pygame.mouse.get_pos()) else DD_COLOR pygame.draw.rect(self.screen, clr, item_rect) t = self.font.render(name, True, TEXT_COLOR) self.screen.blit( t, (item_rect.centerx - t.get_width() // 2, item_rect.centery - t.get_height() // 2) ) def _handle_dropdown_event(self, e): import pygame if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1: if self.dd_btn.collidepoint(e.pos): self.dd_open = not self.dd_open return True if self.dd_open: for i, name in enumerate(self.group_names): item_rect = pygame.Rect(self.dd_btn.left, self.dd_btn.bottom + i * DD_H, DD_W, DD_H) if item_rect.collidepoint(e.pos): if name != self.current_group: self.current_group = name self._build_sliders() self._adjust_height() self.dd_open = False return True self.dd_open = False return False def _save_current(self): for s in self.sliders: self.calibration[s.motor].range_min = s.min_v self.calibration[s.motor].range_max = s.max_v with self.bus.torque_disabled(): self.bus.write_calibration(self.calibration) def _load_current(self): self.calibration = self.bus.read_calibration() for s in self.sliders: s.min_v = self.calibration[s.motor].range_min s.max_v = self.calibration[s.motor].range_max s.min_x = s._pos_from_val(s.min_v) s.max_x = s._pos_from_val(s.max_v) def run(self) -> dict[str, MotorCalibration]: import pygame while True: for e in pygame.event.get(): if e.type == pygame.QUIT: pygame.quit() return self.calibration if self._handle_dropdown_event(e): continue if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1: if self.save_btn.collidepoint(e.pos): self._save_current() elif self.load_btn.collidepoint(e.pos): self._load_current() for s in self.sliders: s.handle_event(e) # live goal write while dragging for s in self.sliders: if s.drag_pos: self.bus.write("Goal_Position", s.motor, s.pos_v, normalize=False) # tick update for s in self.sliders: pos = self.bus.read("Present_Position", s.motor, normalize=False) s.set_tick(pos) self.present_cache[s.motor] = pos # ─ drawing self.screen.fill(BG_COLOR) for s in self.sliders: s.draw(self.screen) self._draw_dropdown() # load / save buttons for rect, text in ((self.load_btn, "LOAD"), (self.save_btn, "SAVE")): clr = BTN_COLOR_HL if rect.collidepoint(pygame.mouse.get_pos()) else BTN_COLOR pygame.draw.rect(self.screen, clr, rect, border_radius=6) t = self.font.render(text, True, TEXT_COLOR) self.screen.blit(t, (rect.centerx - t.get_width() // 2, rect.centery - t.get_height() // 2)) pygame.display.flip() self.clock.tick(FPS)
lerobot/src/lerobot/motors/calibration_gui.py/0
{ "file_path": "lerobot/src/lerobot/motors/calibration_gui.py", "repo_id": "lerobot", "token_count": 7469 }
212
## Paper https://diffusion-policy.cs.columbia.edu ## Citation ```bibtex @article{chi2024diffusionpolicy, author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song}, title ={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion}, journal = {The International Journal of Robotics Research}, year = {2024}, } ```
lerobot/src/lerobot/policies/diffusion/README.md/0
{ "file_path": "lerobot/src/lerobot/policies/diffusion/README.md", "repo_id": "lerobot", "token_count": 130 }
213
# !/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode from lerobot.constants import ACTION, OBS_IMAGE, OBS_STATE from lerobot.optim.optimizers import MultiAdamConfig def is_image_feature(key: str) -> bool: """Check if a feature key represents an image feature. Args: key: The feature key to check Returns: True if the key represents an image feature, False otherwise """ return key.startswith(OBS_IMAGE) @dataclass class ConcurrencyConfig: """Configuration for the concurrency of the actor and learner. Possible values are: - "threads": Use threads for the actor and learner. - "processes": Use processes for the actor and learner. """ actor: str = "threads" learner: str = "threads" @dataclass class ActorLearnerConfig: learner_host: str = "127.0.0.1" learner_port: int = 50051 policy_parameters_push_frequency: int = 4 queue_get_timeout: float = 2 @dataclass class CriticNetworkConfig: hidden_dims: list[int] = field(default_factory=lambda: [256, 256]) activate_final: bool = True final_activation: str | None = None @dataclass class ActorNetworkConfig: hidden_dims: list[int] = field(default_factory=lambda: [256, 256]) activate_final: bool = True @dataclass class PolicyConfig: use_tanh_squash: bool = True std_min: float = 1e-5 std_max: float = 10.0 init_final: float = 0.05 @PreTrainedConfig.register_subclass("sac") @dataclass class SACConfig(PreTrainedConfig): """Soft Actor-Critic (SAC) configuration. SAC is an off-policy actor-critic deep RL algorithm based on the maximum entropy reinforcement learning framework. It learns a policy and a Q-function simultaneously using experience collected from the environment. This configuration class contains all the parameters needed to define a SAC agent, including network architectures, optimization settings, and algorithm-specific hyperparameters. """ # Mapping of feature types to normalization modes normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { "VISUAL": NormalizationMode.MEAN_STD, "STATE": NormalizationMode.MIN_MAX, "ENV": NormalizationMode.MIN_MAX, "ACTION": NormalizationMode.MIN_MAX, } ) # Statistics for normalizing different types of inputs dataset_stats: dict[str, dict[str, list[float]]] | None = field( default_factory=lambda: { OBS_IMAGE: { "mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225], }, OBS_STATE: { "min": [0.0, 0.0], "max": [1.0, 1.0], }, ACTION: { "min": [0.0, 0.0, 0.0], "max": [1.0, 1.0, 1.0], }, } ) # Architecture specifics # Device to run the model on (e.g., "cuda", "cpu") device: str = "cpu" # Device to store the model on storage_device: str = "cpu" # Name of the vision encoder model (Set to "helper2424/resnet10" for hil serl resnet10) vision_encoder_name: str | None = None # Whether to freeze the vision encoder during training freeze_vision_encoder: bool = True # Hidden dimension size for the image encoder image_encoder_hidden_dim: int = 32 # Whether to use a shared encoder for actor and critic shared_encoder: bool = True # Number of discrete actions, eg for gripper actions num_discrete_actions: int | None = None # Dimension of the image embedding pooling image_embedding_pooling_dim: int = 8 # Training parameter # Number of steps for online training online_steps: int = 1000000 # Seed for the online environment online_env_seed: int = 10000 # Capacity of the online replay buffer online_buffer_capacity: int = 100000 # Capacity of the offline replay buffer offline_buffer_capacity: int = 100000 # Whether to use asynchronous prefetching for the buffers async_prefetch: bool = False # Number of steps before learning starts online_step_before_learning: int = 100 # Frequency of policy updates policy_update_freq: int = 1 # SAC algorithm parameters # Discount factor for the SAC algorithm discount: float = 0.99 # Initial temperature value temperature_init: float = 1.0 # Number of critics in the ensemble num_critics: int = 2 # Number of subsampled critics for training num_subsample_critics: int | None = None # Learning rate for the critic network critic_lr: float = 3e-4 # Learning rate for the actor network actor_lr: float = 3e-4 # Learning rate for the temperature parameter temperature_lr: float = 3e-4 # Weight for the critic target update critic_target_update_weight: float = 0.005 # Update-to-data ratio for the UTD algorithm (If you want enable utd_ratio, you need to set it to >1) utd_ratio: int = 1 # Hidden dimension size for the state encoder state_encoder_hidden_dim: int = 256 # Dimension of the latent space latent_dim: int = 256 # Target entropy for the SAC algorithm target_entropy: float | None = None # Whether to use backup entropy for the SAC algorithm use_backup_entropy: bool = True # Gradient clipping norm for the SAC algorithm grad_clip_norm: float = 40.0 # Network configuration # Configuration for the critic network architecture critic_network_kwargs: CriticNetworkConfig = field(default_factory=CriticNetworkConfig) # Configuration for the actor network architecture actor_network_kwargs: ActorNetworkConfig = field(default_factory=ActorNetworkConfig) # Configuration for the policy parameters policy_kwargs: PolicyConfig = field(default_factory=PolicyConfig) # Configuration for the discrete critic network discrete_critic_network_kwargs: CriticNetworkConfig = field(default_factory=CriticNetworkConfig) # Configuration for actor-learner architecture actor_learner_config: ActorLearnerConfig = field(default_factory=ActorLearnerConfig) # Configuration for concurrency settings (you can use threads or processes for the actor and learner) concurrency: ConcurrencyConfig = field(default_factory=ConcurrencyConfig) # Optimizations use_torch_compile: bool = True def __post_init__(self): super().__post_init__() # Any validation specific to SAC configuration def get_optimizer_preset(self) -> MultiAdamConfig: return MultiAdamConfig( weight_decay=0.0, optimizer_groups={ "actor": {"lr": self.actor_lr}, "critic": {"lr": self.critic_lr}, "temperature": {"lr": self.temperature_lr}, }, ) def get_scheduler_preset(self) -> None: return None def validate_features(self) -> None: has_image = any(is_image_feature(key) for key in self.input_features) has_state = OBS_STATE in self.input_features if not (has_state or has_image): raise ValueError( "You must provide either 'observation.state' or an image observation (key starting with 'observation.image') in the input features" ) if "action" not in self.output_features: raise ValueError("You must provide 'action' in the output features") @property def image_features(self) -> list[str]: return [key for key in self.input_features if is_image_feature(key)] @property def observation_delta_indices(self) -> list: return None @property def action_delta_indices(self) -> list: return None # SAC typically predicts one action at a time @property def reward_delta_indices(self) -> None: return None
lerobot/src/lerobot/policies/sac/configuration_sac.py/0
{ "file_path": "lerobot/src/lerobot/policies/sac/configuration_sac.py", "repo_id": "lerobot", "token_count": 3091 }
214
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time from functools import cached_property from typing import Any from lerobot.cameras.utils import make_cameras_from_configs from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.motors import Motor, MotorNormMode from lerobot.motors.calibration_gui import RangeFinderGUI from lerobot.motors.feetech import ( FeetechMotorsBus, ) from ..robot import Robot from ..utils import ensure_safe_goal_position from .config_hope_jr import HopeJrArmConfig logger = logging.getLogger(__name__) class HopeJrArm(Robot): config_class = HopeJrArmConfig name = "hope_jr_arm" def __init__(self, config: HopeJrArmConfig): super().__init__(config) self.config = config self.bus = FeetechMotorsBus( port=self.config.port, motors={ "shoulder_pitch": Motor(1, "sm8512bl", MotorNormMode.RANGE_M100_100), "shoulder_yaw": Motor(2, "sts3250", MotorNormMode.RANGE_M100_100), "shoulder_roll": Motor(3, "sts3250", MotorNormMode.RANGE_M100_100), "elbow_flex": Motor(4, "sts3250", MotorNormMode.RANGE_M100_100), "wrist_roll": Motor(5, "sts3250", MotorNormMode.RANGE_M100_100), "wrist_yaw": Motor(6, "sts3250", MotorNormMode.RANGE_M100_100), "wrist_pitch": Motor(7, "sts3250", MotorNormMode.RANGE_M100_100), }, calibration=self.calibration, ) self.cameras = make_cameras_from_configs(config.cameras) # HACK self.shoulder_pitch = "shoulder_pitch" self.other_motors = [m for m in self.bus.motors if m != "shoulder_pitch"] @property def _motors_ft(self) -> dict[str, type]: return {f"{motor}.pos": float for motor in self.bus.motors} @property def _cameras_ft(self) -> dict[str, tuple]: return { cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras } @cached_property def observation_features(self) -> dict[str, type | tuple]: return {**self._motors_ft, **self._cameras_ft} @cached_property def action_features(self) -> dict[str, type]: return self._motors_ft @property def is_connected(self) -> bool: return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) def connect(self, calibrate: bool = True) -> None: """ We assume that at connection time, arm is in a rest position, and torque can be safely disabled to run calibration. """ if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} already connected") self.bus.connect(handshake=False) if not self.is_calibrated and calibrate: self.calibrate() # Connect the cameras for cam in self.cameras.values(): cam.connect() self.configure() logger.info(f"{self} connected.") @property def is_calibrated(self) -> bool: return self.bus.is_calibrated def calibrate(self, limb_name: str = None) -> None: groups = { "all": list(self.bus.motors.keys()), "shoulder": ["shoulder_pitch", "shoulder_yaw", "shoulder_roll"], "elbow": ["elbow_flex"], "wrist": ["wrist_roll", "wrist_yaw", "wrist_pitch"], } self.calibration = RangeFinderGUI(self.bus, groups).run() self._save_calibration() print("Calibration saved to", self.calibration_fpath) def configure(self) -> None: with self.bus.torque_disabled(): self.bus.configure_motors(maximum_acceleration=30, acceleration=30) def setup_motors(self) -> None: # TODO: add docstring for motor in reversed(self.bus.motors): input(f"Connect the controller board to the '{motor}' motor only and press enter.") self.bus.setup_motor(motor) print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") def get_observation(self) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") # Read arm position start = time.perf_counter() obs_dict = self.bus.sync_read("Present_Position", self.other_motors) obs_dict[self.shoulder_pitch] = self.bus.read("Present_Position", self.shoulder_pitch) obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read state: {dt_ms:.1f}ms") # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() obs_dict[cam_key] = cam.async_read() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") return obs_dict def send_action(self, action: dict[str, Any]) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")} # Cap goal position when too far away from present position. # /!\ Slower fps expected due to reading from the follower. if self.config.max_relative_target is not None: present_pos = self.bus.sync_read("Present_Position") goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()} goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target) self.bus.sync_write("Goal_Position", goal_pos) return {f"{motor}.pos": val for motor, val in goal_pos.items()} def disconnect(self): if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") self.bus.disconnect(self.config.disable_torque_on_disconnect) for cam in self.cameras.values(): cam.disconnect() logger.info(f"{self} disconnected.")
lerobot/src/lerobot/robots/hope_jr/hope_jr_arm.py/0
{ "file_path": "lerobot/src/lerobot/robots/hope_jr/hope_jr_arm.py", "repo_id": "lerobot", "token_count": 2878 }
215
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time from functools import cached_property from typing import Any from lerobot.cameras.utils import make_cameras_from_configs from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.motors import Motor, MotorCalibration, MotorNormMode from lerobot.motors.feetech import ( FeetechMotorsBus, OperatingMode, ) from ..robot import Robot from ..utils import ensure_safe_goal_position from .config_so100_follower import SO100FollowerConfig logger = logging.getLogger(__name__) class SO100Follower(Robot): """ [SO-100 Follower Arm](https://github.com/TheRobotStudio/SO-ARM100) designed by TheRobotStudio """ config_class = SO100FollowerConfig name = "so100_follower" def __init__(self, config: SO100FollowerConfig): super().__init__(config) self.config = config norm_mode_body = MotorNormMode.DEGREES if config.use_degrees else MotorNormMode.RANGE_M100_100 self.bus = FeetechMotorsBus( port=self.config.port, motors={ "shoulder_pan": Motor(1, "sts3215", norm_mode_body), "shoulder_lift": Motor(2, "sts3215", norm_mode_body), "elbow_flex": Motor(3, "sts3215", norm_mode_body), "wrist_flex": Motor(4, "sts3215", norm_mode_body), "wrist_roll": Motor(5, "sts3215", norm_mode_body), "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), }, calibration=self.calibration, ) self.cameras = make_cameras_from_configs(config.cameras) @property def _motors_ft(self) -> dict[str, type]: return {f"{motor}.pos": float for motor in self.bus.motors} @property def _cameras_ft(self) -> dict[str, tuple]: return { cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras } @cached_property def observation_features(self) -> dict[str, type | tuple]: return {**self._motors_ft, **self._cameras_ft} @cached_property def action_features(self) -> dict[str, type]: return self._motors_ft @property def is_connected(self) -> bool: return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) def connect(self, calibrate: bool = True) -> None: """ We assume that at connection time, arm is in a rest position, and torque can be safely disabled to run calibration. """ if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} already connected") self.bus.connect() if not self.is_calibrated and calibrate: logger.info( "Mismatch between calibration values in the motor and the calibration file or no calibration file found" ) self.calibrate() for cam in self.cameras.values(): cam.connect() self.configure() logger.info(f"{self} connected.") @property def is_calibrated(self) -> bool: return self.bus.is_calibrated def calibrate(self) -> None: if self.calibration: # Calibration file exists, ask user whether to use it or run new calibration user_input = input( f"Press ENTER to use provided calibration file associated with the id {self.id}, or type 'c' and press ENTER to run calibration: " ) if user_input.strip().lower() != "c": logger.info(f"Writing calibration file associated with the id {self.id} to the motors") self.bus.write_calibration(self.calibration) return logger.info(f"\nRunning calibration of {self}") self.bus.disable_torque() for motor in self.bus.motors: self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) input(f"Move {self} to the middle of its range of motion and press ENTER....") homing_offsets = self.bus.set_half_turn_homings() full_turn_motor = "wrist_roll" unknown_range_motors = [motor for motor in self.bus.motors if motor != full_turn_motor] print( f"Move all joints except '{full_turn_motor}' sequentially through their " "entire ranges of motion.\nRecording positions. Press ENTER to stop..." ) range_mins, range_maxes = self.bus.record_ranges_of_motion(unknown_range_motors) range_mins[full_turn_motor] = 0 range_maxes[full_turn_motor] = 4095 self.calibration = {} for motor, m in self.bus.motors.items(): self.calibration[motor] = MotorCalibration( id=m.id, drive_mode=0, homing_offset=homing_offsets[motor], range_min=range_mins[motor], range_max=range_maxes[motor], ) self.bus.write_calibration(self.calibration) self._save_calibration() print("Calibration saved to", self.calibration_fpath) def configure(self) -> None: with self.bus.torque_disabled(): self.bus.configure_motors() for motor in self.bus.motors: self.bus.write("Operating_Mode", motor, OperatingMode.POSITION.value) # Set P_Coefficient to lower value to avoid shakiness (Default is 32) self.bus.write("P_Coefficient", motor, 16) # Set I_Coefficient and D_Coefficient to default value 0 and 32 self.bus.write("I_Coefficient", motor, 0) self.bus.write("D_Coefficient", motor, 32) def setup_motors(self) -> None: for motor in reversed(self.bus.motors): input(f"Connect the controller board to the '{motor}' motor only and press enter.") self.bus.setup_motor(motor) print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") def get_observation(self) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") # Read arm position start = time.perf_counter() obs_dict = self.bus.sync_read("Present_Position") obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read state: {dt_ms:.1f}ms") # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() obs_dict[cam_key] = cam.async_read() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") return obs_dict def send_action(self, action: dict[str, Any]) -> dict[str, Any]: """Command arm to move to a target joint configuration. The relative action magnitude may be clipped depending on the configuration parameter `max_relative_target`. In this case, the action sent differs from original action. Thus, this function always returns the action actually sent. Raises: RobotDeviceNotConnectedError: if robot is not connected. Returns: the action sent to the motors, potentially clipped. """ if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")} # Cap goal position when too far away from present position. # /!\ Slower fps expected due to reading from the follower. if self.config.max_relative_target is not None: present_pos = self.bus.sync_read("Present_Position") goal_present_pos = {key: (g_pos, present_pos[key]) for key, g_pos in goal_pos.items()} goal_pos = ensure_safe_goal_position(goal_present_pos, self.config.max_relative_target) # Send goal position to the arm self.bus.sync_write("Goal_Position", goal_pos) return {f"{motor}.pos": val for motor, val in goal_pos.items()} def disconnect(self): if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") self.bus.disconnect(self.config.disable_torque_on_disconnect) for cam in self.cameras.values(): cam.disconnect() logger.info(f"{self} disconnected.")
lerobot/src/lerobot/robots/so100_follower/so100_follower.py/0
{ "file_path": "lerobot/src/lerobot/robots/so100_follower/so100_follower.py", "repo_id": "lerobot", "token_count": 3841 }
216
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluate a policy on an environment by running rollouts and computing metrics. Usage examples: You want to evaluate a model from the hub (eg: https://huggingface.co/lerobot/diffusion_pusht) for 10 episodes. ``` lerobot-eval \ --policy.path=lerobot/diffusion_pusht \ --env.type=pusht \ --eval.batch_size=10 \ --eval.n_episodes=10 \ --use_amp=false \ --device=cuda ``` OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes. ``` lerobot-eval \ --policy.path=outputs/train/diffusion_pusht/checkpoints/005000/pretrained_model \ --env.type=pusht \ --eval.batch_size=10 \ --eval.n_episodes=10 \ --use_amp=false \ --device=cuda ``` Note that in both examples, the repo/folder should contain at least `config.json` and `model.safetensors` files. You can learn about the CLI options for this script in the `EvalPipelineConfig` in lerobot/configs/eval.py """ import json import logging import threading import time from collections.abc import Callable from contextlib import nullcontext from copy import deepcopy from dataclasses import asdict from pathlib import Path from pprint import pformat import einops import gymnasium as gym import numpy as np import torch from termcolor import colored from torch import Tensor, nn from tqdm import trange from lerobot.configs import parser from lerobot.configs.eval import EvalPipelineConfig from lerobot.envs.factory import make_env from lerobot.envs.utils import add_envs_task, check_env_attributes_and_types, preprocess_observation from lerobot.policies.factory import make_policy from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.utils import get_device_from_parameters from lerobot.utils.io_utils import write_video from lerobot.utils.random_utils import set_seed from lerobot.utils.utils import ( get_safe_torch_device, init_logging, inside_slurm, ) def rollout( env: gym.vector.VectorEnv, policy: PreTrainedPolicy, seeds: list[int] | None = None, return_observations: bool = False, render_callback: Callable[[gym.vector.VectorEnv], None] | None = None, ) -> dict: """Run a batched policy rollout once through a batch of environments. Note that all environments in the batch are run until the last environment is done. This means some data will probably need to be discarded (for environments that aren't the first one to be done). The return dictionary contains: (optional) "observation": A dictionary of (batch, sequence + 1, *) tensors mapped to observation keys. NOTE that this has an extra sequence element relative to the other keys in the dictionary. This is because an extra observation is included for after the environment is terminated or truncated. "action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not including the last observations). "reward": A (batch, sequence) tensor of rewards received for applying the actions. "success": A (batch, sequence) tensor of success conditions (the only time this can be True is upon environment termination/truncation). "done": A (batch, sequence) tensor of **cumulative** done conditions. For any given batch element, the first True is followed by True's all the way till the end. This can be used for masking extraneous elements from the sequences above. Args: env: The batch of environments. policy: The policy. Must be a PyTorch nn module. seeds: The environments are seeded once at the start of the rollout. If provided, this argument specifies the seeds for each of the environments. return_observations: Whether to include all observations in the returned rollout data. Observations are returned optionally because they typically take more memory to cache. Defaults to False. render_callback: Optional rendering callback to be used after the environments are reset, and after every step. Returns: The dictionary described above. """ assert isinstance(policy, nn.Module), "Policy must be a PyTorch nn module." device = get_device_from_parameters(policy) # Reset the policy and environments. policy.reset() observation, info = env.reset(seed=seeds) if render_callback is not None: render_callback(env) all_observations = [] all_actions = [] all_rewards = [] all_successes = [] all_dones = [] step = 0 # Keep track of which environments are done. done = np.array([False] * env.num_envs) max_steps = env.call("_max_episode_steps")[0] progbar = trange( max_steps, desc=f"Running rollout with at most {max_steps} steps", disable=inside_slurm(), # we dont want progress bar when we use slurm, since it clutters the logs leave=False, ) check_env_attributes_and_types(env) while not np.all(done): # Numpy array to tensor and changing dictionary keys to LeRobot policy format. observation = preprocess_observation(observation) if return_observations: all_observations.append(deepcopy(observation)) observation = { key: observation[key].to(device, non_blocking=device.type == "cuda") for key in observation } # Infer "task" from attributes of environments. # TODO: works with SyncVectorEnv but not AsyncVectorEnv observation = add_envs_task(env, observation) with torch.inference_mode(): action = policy.select_action(observation) # Convert to CPU / numpy. action = action.to("cpu").numpy() assert action.ndim == 2, "Action dimensions should be (batch, action_dim)" # Apply the next action. observation, reward, terminated, truncated, info = env.step(action) if render_callback is not None: render_callback(env) # VectorEnv stores is_success in `info["final_info"][env_index]["is_success"]`. "final_info" isn't # available of none of the envs finished. if "final_info" in info: successes = [info["is_success"] if info is not None else False for info in info["final_info"]] else: successes = [False] * env.num_envs # Keep track of which environments are done so far. done = terminated | truncated | done all_actions.append(torch.from_numpy(action)) all_rewards.append(torch.from_numpy(reward)) all_dones.append(torch.from_numpy(done)) all_successes.append(torch.tensor(successes)) step += 1 running_success_rate = ( einops.reduce(torch.stack(all_successes, dim=1), "b n -> b", "any").numpy().mean() ) progbar.set_postfix({"running_success_rate": f"{running_success_rate.item() * 100:.1f}%"}) progbar.update() # Track the final observation. if return_observations: observation = preprocess_observation(observation) all_observations.append(deepcopy(observation)) # Stack the sequence along the first dimension so that we have (batch, sequence, *) tensors. ret = { "action": torch.stack(all_actions, dim=1), "reward": torch.stack(all_rewards, dim=1), "success": torch.stack(all_successes, dim=1), "done": torch.stack(all_dones, dim=1), } if return_observations: stacked_observations = {} for key in all_observations[0]: stacked_observations[key] = torch.stack([obs[key] for obs in all_observations], dim=1) ret["observation"] = stacked_observations if hasattr(policy, "use_original_modules"): policy.use_original_modules() return ret def eval_policy( env: gym.vector.VectorEnv, policy: PreTrainedPolicy, n_episodes: int, max_episodes_rendered: int = 0, videos_dir: Path | None = None, return_episode_data: bool = False, start_seed: int | None = None, ) -> dict: """ Args: env: The batch of environments. policy: The policy. n_episodes: The number of episodes to evaluate. max_episodes_rendered: Maximum number of episodes to render into videos. videos_dir: Where to save rendered videos. return_episode_data: Whether to return episode data for online training. Incorporates the data into the "episodes" key of the returned dictionary. start_seed: The first seed to use for the first individual rollout. For all subsequent rollouts the seed is incremented by 1. If not provided, the environments are not manually seeded. Returns: Dictionary with metrics and data regarding the rollouts. """ if max_episodes_rendered > 0 and not videos_dir: raise ValueError("If max_episodes_rendered > 0, videos_dir must be provided.") if not isinstance(policy, PreTrainedPolicy): raise ValueError( f"Policy of type 'PreTrainedPolicy' is expected, but type '{type(policy)}' was provided." ) start = time.time() policy.eval() # Determine how many batched rollouts we need to get n_episodes. Note that if n_episodes is not evenly # divisible by env.num_envs we end up discarding some data in the last batch. n_batches = n_episodes // env.num_envs + int((n_episodes % env.num_envs) != 0) # Keep track of some metrics. sum_rewards = [] max_rewards = [] all_successes = [] all_seeds = [] threads = [] # for video saving threads n_episodes_rendered = 0 # for saving the correct number of videos # Callback for visualization. def render_frame(env: gym.vector.VectorEnv): # noqa: B023 if n_episodes_rendered >= max_episodes_rendered: return n_to_render_now = min(max_episodes_rendered - n_episodes_rendered, env.num_envs) if isinstance(env, gym.vector.SyncVectorEnv): ep_frames.append(np.stack([env.envs[i].render() for i in range(n_to_render_now)])) # noqa: B023 elif isinstance(env, gym.vector.AsyncVectorEnv): # Here we must render all frames and discard any we don't need. ep_frames.append(np.stack(env.call("render")[:n_to_render_now])) if max_episodes_rendered > 0: video_paths: list[str] = [] if return_episode_data: episode_data: dict | None = None # we dont want progress bar when we use slurm, since it clutters the logs progbar = trange(n_batches, desc="Stepping through eval batches", disable=inside_slurm()) for batch_ix in progbar: # Cache frames for rendering videos. Each item will be (b, h, w, c), and the list indexes the rollout # step. if max_episodes_rendered > 0: ep_frames: list[np.ndarray] = [] if start_seed is None: seeds = None else: seeds = range( start_seed + (batch_ix * env.num_envs), start_seed + ((batch_ix + 1) * env.num_envs) ) rollout_data = rollout( env, policy, seeds=list(seeds) if seeds else None, return_observations=return_episode_data, render_callback=render_frame if max_episodes_rendered > 0 else None, ) # Figure out where in each rollout sequence the first done condition was encountered (results after # this won't be included). n_steps = rollout_data["done"].shape[1] # Note: this relies on a property of argmax: that it returns the first occurrence as a tiebreaker. done_indices = torch.argmax(rollout_data["done"].to(int), dim=1) # Make a mask with shape (batch, n_steps) to mask out rollout data after the first done # (batch-element-wise). Note the `done_indices + 1` to make sure to keep the data from the done step. mask = (torch.arange(n_steps) <= einops.repeat(done_indices + 1, "b -> b s", s=n_steps)).int() # Extend metrics. batch_sum_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "sum") sum_rewards.extend(batch_sum_rewards.tolist()) batch_max_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "max") max_rewards.extend(batch_max_rewards.tolist()) batch_successes = einops.reduce((rollout_data["success"] * mask), "b n -> b", "any") all_successes.extend(batch_successes.tolist()) if seeds: all_seeds.extend(seeds) else: all_seeds.append(None) # FIXME: episode_data is either None or it doesn't exist if return_episode_data: this_episode_data = _compile_episode_data( rollout_data, done_indices, start_episode_index=batch_ix * env.num_envs, start_data_index=(0 if episode_data is None else (episode_data["index"][-1].item() + 1)), fps=env.unwrapped.metadata["render_fps"], ) if episode_data is None: episode_data = this_episode_data else: # Some sanity checks to make sure we are correctly compiling the data. assert episode_data["episode_index"][-1] + 1 == this_episode_data["episode_index"][0] assert episode_data["index"][-1] + 1 == this_episode_data["index"][0] # Concatenate the episode data. episode_data = {k: torch.cat([episode_data[k], this_episode_data[k]]) for k in episode_data} # Maybe render video for visualization. if max_episodes_rendered > 0 and len(ep_frames) > 0: batch_stacked_frames = np.stack(ep_frames, axis=1) # (b, t, *) for stacked_frames, done_index in zip( batch_stacked_frames, done_indices.flatten().tolist(), strict=False ): if n_episodes_rendered >= max_episodes_rendered: break videos_dir.mkdir(parents=True, exist_ok=True) video_path = videos_dir / f"eval_episode_{n_episodes_rendered}.mp4" video_paths.append(str(video_path)) thread = threading.Thread( target=write_video, args=( str(video_path), stacked_frames[: done_index + 1], # + 1 to capture the last observation env.unwrapped.metadata["render_fps"], ), ) thread.start() threads.append(thread) n_episodes_rendered += 1 progbar.set_postfix( {"running_success_rate": f"{np.mean(all_successes[:n_episodes]).item() * 100:.1f}%"} ) # Wait till all video rendering threads are done. for thread in threads: thread.join() # Compile eval info. info = { "per_episode": [ { "episode_ix": i, "sum_reward": sum_reward, "max_reward": max_reward, "success": success, "seed": seed, } for i, (sum_reward, max_reward, success, seed) in enumerate( zip( sum_rewards[:n_episodes], max_rewards[:n_episodes], all_successes[:n_episodes], all_seeds[:n_episodes], strict=True, ) ) ], "aggregated": { "avg_sum_reward": float(np.nanmean(sum_rewards[:n_episodes])), "avg_max_reward": float(np.nanmean(max_rewards[:n_episodes])), "pc_success": float(np.nanmean(all_successes[:n_episodes]) * 100), "eval_s": time.time() - start, "eval_ep_s": (time.time() - start) / n_episodes, }, } if return_episode_data: info["episodes"] = episode_data if max_episodes_rendered > 0: info["video_paths"] = video_paths return info def _compile_episode_data( rollout_data: dict, done_indices: Tensor, start_episode_index: int, start_data_index: int, fps: float ) -> dict: """Convenience function for `eval_policy(return_episode_data=True)` Compiles all the rollout data into a Hugging Face dataset. Similar logic is implemented when datasets are pushed to hub (see: `push_to_hub`). """ ep_dicts = [] total_frames = 0 for ep_ix in range(rollout_data["action"].shape[0]): # + 2 to include the first done frame and the last observation frame. num_frames = done_indices[ep_ix].item() + 2 total_frames += num_frames # Here we do `num_frames - 1` as we don't want to include the last observation frame just yet. ep_dict = { "action": rollout_data["action"][ep_ix, : num_frames - 1], "episode_index": torch.tensor([start_episode_index + ep_ix] * (num_frames - 1)), "frame_index": torch.arange(0, num_frames - 1, 1), "timestamp": torch.arange(0, num_frames - 1, 1) / fps, "next.done": rollout_data["done"][ep_ix, : num_frames - 1], "next.success": rollout_data["success"][ep_ix, : num_frames - 1], "next.reward": rollout_data["reward"][ep_ix, : num_frames - 1].type(torch.float32), } # For the last observation frame, all other keys will just be copy padded. for k in ep_dict: ep_dict[k] = torch.cat([ep_dict[k], ep_dict[k][-1:]]) for key in rollout_data["observation"]: ep_dict[key] = rollout_data["observation"][key][ep_ix, :num_frames] ep_dicts.append(ep_dict) data_dict = {} for key in ep_dicts[0]: data_dict[key] = torch.cat([x[key] for x in ep_dicts]) data_dict["index"] = torch.arange(start_data_index, start_data_index + total_frames, 1) return data_dict @parser.wrap() def eval_main(cfg: EvalPipelineConfig): logging.info(pformat(asdict(cfg))) # Check device is available device = get_safe_torch_device(cfg.policy.device, log=True) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True set_seed(cfg.seed) logging.info(colored("Output dir:", "yellow", attrs=["bold"]) + f" {cfg.output_dir}") logging.info("Making environment.") env = make_env(cfg.env, n_envs=cfg.eval.batch_size, use_async_envs=cfg.eval.use_async_envs) logging.info("Making policy.") policy = make_policy( cfg=cfg.policy, env_cfg=cfg.env, ) policy.eval() with torch.no_grad(), torch.autocast(device_type=device.type) if cfg.policy.use_amp else nullcontext(): info = eval_policy( env, policy, cfg.eval.n_episodes, max_episodes_rendered=10, videos_dir=Path(cfg.output_dir) / "videos", start_seed=cfg.seed, ) print(info["aggregated"]) # Save info with open(Path(cfg.output_dir) / "eval_info.json", "w") as f: json.dump(info, f, indent=2) env.close() logging.info("End of eval") def main(): init_logging() eval_main() if __name__ == "__main__": main()
lerobot/src/lerobot/scripts/eval.py/0
{ "file_path": "lerobot/src/lerobot/scripts/eval.py", "repo_id": "lerobot", "token_count": 8213 }
217
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Visualize effects of image transforms for a given configuration. This script will generate examples of transformed images as they are output by LeRobot dataset. Additionally, each individual transform can be visualized separately as well as examples of combined transforms Example: ```bash python -m lerobot.scripts.visualize_image_transforms \ --repo_id=lerobot/pusht \ --episodes='[0]' \ --image_transforms.enable=True ``` """ import logging from copy import deepcopy from dataclasses import replace from pathlib import Path import draccus from torchvision.transforms import ToPILImage from lerobot.configs.default import DatasetConfig from lerobot.datasets.lerobot_dataset import LeRobotDataset from lerobot.datasets.transforms import ( ImageTransforms, ImageTransformsConfig, make_transform_from_config, ) OUTPUT_DIR = Path("outputs/image_transforms") to_pil = ToPILImage() def save_all_transforms(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples): output_dir_all = output_dir / "all" output_dir_all.mkdir(parents=True, exist_ok=True) tfs = ImageTransforms(cfg) for i in range(1, n_examples + 1): transformed_frame = tfs(original_frame) to_pil(transformed_frame).save(output_dir_all / f"{i}.png", quality=100) print("Combined transforms examples saved to:") print(f" {output_dir_all}") def save_each_transform(cfg: ImageTransformsConfig, original_frame, output_dir, n_examples): if not cfg.enable: logging.warning( "No single transforms will be saved, because `image_transforms.enable=False`. To enable, set `enable` to True in `ImageTransformsConfig` or in the command line with `--image_transforms.enable=True`." ) return print("Individual transforms examples saved to:") for tf_name, tf_cfg in cfg.tfs.items(): # Apply a few transformation with random value in min_max range output_dir_single = output_dir / tf_name output_dir_single.mkdir(parents=True, exist_ok=True) tf = make_transform_from_config(tf_cfg) for i in range(1, n_examples + 1): transformed_frame = tf(original_frame) to_pil(transformed_frame).save(output_dir_single / f"{i}.png", quality=100) # Apply min, max, average transformations tf_cfg_kwgs_min = deepcopy(tf_cfg.kwargs) tf_cfg_kwgs_max = deepcopy(tf_cfg.kwargs) tf_cfg_kwgs_avg = deepcopy(tf_cfg.kwargs) for key, (min_, max_) in tf_cfg.kwargs.items(): avg = (min_ + max_) / 2 tf_cfg_kwgs_min[key] = [min_, min_] tf_cfg_kwgs_max[key] = [max_, max_] tf_cfg_kwgs_avg[key] = [avg, avg] tf_min = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_min})) tf_max = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_max})) tf_avg = make_transform_from_config(replace(tf_cfg, **{"kwargs": tf_cfg_kwgs_avg})) tf_frame_min = tf_min(original_frame) tf_frame_max = tf_max(original_frame) tf_frame_avg = tf_avg(original_frame) to_pil(tf_frame_min).save(output_dir_single / "min.png", quality=100) to_pil(tf_frame_max).save(output_dir_single / "max.png", quality=100) to_pil(tf_frame_avg).save(output_dir_single / "mean.png", quality=100) print(f" {output_dir_single}") @draccus.wrap() def visualize_image_transforms(cfg: DatasetConfig, output_dir: Path = OUTPUT_DIR, n_examples: int = 5): dataset = LeRobotDataset( repo_id=cfg.repo_id, episodes=cfg.episodes, revision=cfg.revision, video_backend=cfg.video_backend, ) output_dir = output_dir / cfg.repo_id.split("/")[-1] output_dir.mkdir(parents=True, exist_ok=True) # Get 1st frame from 1st camera of 1st episode original_frame = dataset[0][dataset.meta.camera_keys[0]] to_pil(original_frame).save(output_dir / "original_frame.png", quality=100) print("\nOriginal frame saved to:") print(f" {output_dir / 'original_frame.png'}.") save_all_transforms(cfg.image_transforms, original_frame, output_dir, n_examples) save_each_transform(cfg.image_transforms, original_frame, output_dir, n_examples) if __name__ == "__main__": visualize_image_transforms()
lerobot/src/lerobot/scripts/visualize_image_transforms.py/0
{ "file_path": "lerobot/src/lerobot/scripts/visualize_image_transforms.py", "repo_id": "lerobot", "token_count": 1928 }
218
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. INDEX_SPLAY = 0.3 MIDDLE_SPLAY = 0.3 RING_SPLAY = 0.3 PINKY_SPLAY = 0.5 def get_ulnar_flexion(flexion: float, abduction: float, splay: float): return -abduction * splay + flexion * (1 - splay) def get_radial_flexion(flexion: float, abduction: float, splay: float): return abduction * splay + flexion * (1 - splay) def homunculus_glove_to_hope_jr_hand(glove_action: dict[str, float]) -> dict[str, float]: return { "thumb_cmc.pos": glove_action["thumb_cmc.pos"], "thumb_mcp.pos": glove_action["thumb_mcp.pos"], "thumb_pip.pos": glove_action["thumb_pip.pos"], "thumb_dip.pos": glove_action["thumb_dip.pos"], "index_radial_flexor.pos": get_radial_flexion( glove_action["index_mcp_flexion.pos"], glove_action["index_mcp_abduction.pos"], INDEX_SPLAY ), "index_ulnar_flexor.pos": get_ulnar_flexion( glove_action["index_mcp_flexion.pos"], glove_action["index_mcp_abduction.pos"], INDEX_SPLAY ), "index_pip_dip.pos": glove_action["index_dip.pos"], "middle_radial_flexor.pos": get_radial_flexion( glove_action["middle_mcp_flexion.pos"], glove_action["middle_mcp_abduction.pos"], MIDDLE_SPLAY ), "middle_ulnar_flexor.pos": get_ulnar_flexion( glove_action["middle_mcp_flexion.pos"], glove_action["middle_mcp_abduction.pos"], MIDDLE_SPLAY ), "middle_pip_dip.pos": glove_action["middle_dip.pos"], "ring_radial_flexor.pos": get_radial_flexion( glove_action["ring_mcp_flexion.pos"], glove_action["ring_mcp_abduction.pos"], RING_SPLAY ), "ring_ulnar_flexor.pos": get_ulnar_flexion( glove_action["ring_mcp_flexion.pos"], glove_action["ring_mcp_abduction.pos"], RING_SPLAY ), "ring_pip_dip.pos": glove_action["ring_dip.pos"], "pinky_radial_flexor.pos": get_radial_flexion( glove_action["pinky_mcp_flexion.pos"], glove_action["pinky_mcp_abduction.pos"], PINKY_SPLAY ), "pinky_ulnar_flexor.pos": get_ulnar_flexion( glove_action["pinky_mcp_flexion.pos"], glove_action["pinky_mcp_abduction.pos"], PINKY_SPLAY ), "pinky_pip_dip.pos": glove_action["pinky_dip.pos"], }
lerobot/src/lerobot/teleoperators/homunculus/joints_translation.py/0
{ "file_path": "lerobot/src/lerobot/teleoperators/homunculus/joints_translation.py", "repo_id": "lerobot", "token_count": 1249 }
219
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import builtins from pathlib import Path from typing import Any import draccus from lerobot.constants import HF_LEROBOT_CALIBRATION, TELEOPERATORS from lerobot.motors.motors_bus import MotorCalibration from .config import TeleoperatorConfig class Teleoperator(abc.ABC): """ The base abstract class for all LeRobot-compatible teleoperation devices. This class provides a standardized interface for interacting with physical teleoperators. Subclasses must implement all abstract methods and properties to be usable. Attributes: config_class (RobotConfig): The expected configuration class for this teleoperator. name (str): The unique name used to identify this teleoperator type. """ # Set these in ALL subclasses config_class: builtins.type[TeleoperatorConfig] name: str def __init__(self, config: TeleoperatorConfig): self.id = config.id self.calibration_dir = ( config.calibration_dir if config.calibration_dir else HF_LEROBOT_CALIBRATION / TELEOPERATORS / self.name ) self.calibration_dir.mkdir(parents=True, exist_ok=True) self.calibration_fpath = self.calibration_dir / f"{self.id}.json" self.calibration: dict[str, MotorCalibration] = {} if self.calibration_fpath.is_file(): self._load_calibration() def __str__(self) -> str: return f"{self.id} {self.__class__.__name__}" @property @abc.abstractmethod def action_features(self) -> dict: """ A dictionary describing the structure and types of the actions produced by the teleoperator. Its structure (keys) should match the structure of what is returned by :pymeth:`get_action`. Values for the dict should be the type of the value if it's a simple value, e.g. `float` for single proprioceptive value (a joint's goal position/velocity) Note: this property should be able to be called regardless of whether the robot is connected or not. """ pass @property @abc.abstractmethod def feedback_features(self) -> dict: """ A dictionary describing the structure and types of the feedback actions expected by the robot. Its structure (keys) should match the structure of what is passed to :pymeth:`send_feedback`. Values for the dict should be the type of the value if it's a simple value, e.g. `float` for single proprioceptive value (a joint's goal position/velocity) Note: this property should be able to be called regardless of whether the robot is connected or not. """ pass @property @abc.abstractmethod def is_connected(self) -> bool: """ Whether the teleoperator is currently connected or not. If `False`, calling :pymeth:`get_action` or :pymeth:`send_feedback` should raise an error. """ pass @abc.abstractmethod def connect(self, calibrate: bool = True) -> None: """ Establish communication with the teleoperator. Args: calibrate (bool): If True, automatically calibrate the teleoperator after connecting if it's not calibrated or needs calibration (this is hardware-dependant). """ pass @property @abc.abstractmethod def is_calibrated(self) -> bool: """Whether the teleoperator is currently calibrated or not. Should be always `True` if not applicable""" pass @abc.abstractmethod def calibrate(self) -> None: """ Calibrate the teleoperator if applicable. If not, this should be a no-op. This method should collect any necessary data (e.g., motor offsets) and update the :pyattr:`calibration` dictionary accordingly. """ pass def _load_calibration(self, fpath: Path | None = None) -> None: """ Helper to load calibration data from the specified file. Args: fpath (Path | None): Optional path to the calibration file. Defaults to `self.calibration_fpath`. """ fpath = self.calibration_fpath if fpath is None else fpath with open(fpath) as f, draccus.config_type("json"): self.calibration = draccus.load(dict[str, MotorCalibration], f) def _save_calibration(self, fpath: Path | None = None) -> None: """ Helper to save calibration data to the specified file. Args: fpath (Path | None): Optional path to save the calibration file. Defaults to `self.calibration_fpath`. """ fpath = self.calibration_fpath if fpath is None else fpath with open(fpath, "w") as f, draccus.config_type("json"): draccus.dump(self.calibration, f, indent=4) @abc.abstractmethod def configure(self) -> None: """ Apply any one-time or runtime configuration to the teleoperator. This may include setting motor parameters, control modes, or initial state. """ pass @abc.abstractmethod def get_action(self) -> dict[str, Any]: """ Retrieve the current action from the teleoperator. Returns: dict[str, Any]: A flat dictionary representing the teleoperator's current actions. Its structure should match :pymeth:`observation_features`. """ pass @abc.abstractmethod def send_feedback(self, feedback: dict[str, Any]) -> None: """ Send a feedback action command to the teleoperator. Args: feedback (dict[str, Any]): Dictionary representing the desired feedback. Its structure should match :pymeth:`feedback_features`. Returns: dict[str, Any]: The action actually sent to the motors potentially clipped or modified, e.g. by safety limits on velocity. """ pass @abc.abstractmethod def disconnect(self) -> None: """Disconnect from the teleoperator and perform any necessary cleanup.""" pass
lerobot/src/lerobot/teleoperators/teleoperator.py/0
{ "file_path": "lerobot/src/lerobot/teleoperators/teleoperator.py", "repo_id": "lerobot", "token_count": 2461 }
220
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import builtins from pathlib import Path from tempfile import TemporaryDirectory from typing import Any, TypeVar from huggingface_hub import HfApi from huggingface_hub.utils import validate_hf_hub_args T = TypeVar("T", bound="HubMixin") class HubMixin: """ A Mixin containing the functionality to push an object to the hub. This is similar to huggingface_hub.ModelHubMixin but is lighter and makes less assumptions about its subclasses (in particular, the fact that it's not necessarily a model). The inheriting classes must implement '_save_pretrained' and 'from_pretrained'. """ def save_pretrained( self, save_directory: str | Path, *, repo_id: str | None = None, push_to_hub: bool = False, card_kwargs: dict[str, Any] | None = None, **push_to_hub_kwargs, ) -> str | None: """ Save object in local directory. Args: save_directory (`str` or `Path`): Path to directory in which the object will be saved. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your object to the Huggingface Hub after saving it. repo_id (`str`, *optional*): ID of your repository on the Hub. Used only if `push_to_hub=True`. Will default to the folder name if not provided. card_kwargs (`Dict[str, Any]`, *optional*): Additional arguments passed to the card template to customize the card. push_to_hub_kwargs: Additional key word arguments passed along to the [`~HubMixin.push_to_hub`] method. Returns: `str` or `None`: url of the commit on the Hub if `push_to_hub=True`, `None` otherwise. """ save_directory = Path(save_directory) save_directory.mkdir(parents=True, exist_ok=True) # save object (weights, files, etc.) self._save_pretrained(save_directory) # push to the Hub if required if push_to_hub: if repo_id is None: repo_id = save_directory.name # Defaults to `save_directory` name return self.push_to_hub(repo_id=repo_id, card_kwargs=card_kwargs, **push_to_hub_kwargs) return None def _save_pretrained(self, save_directory: Path) -> None: """ Overwrite this method in subclass to define how to save your object. Args: save_directory (`str` or `Path`): Path to directory in which the object files will be saved. """ raise NotImplementedError @classmethod @validate_hf_hub_args def from_pretrained( cls: builtins.type[T], pretrained_name_or_path: str | Path, *, force_download: bool = False, resume_download: bool | None = None, proxies: dict | None = None, token: str | bool | None = None, cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, **kwargs, ) -> T: """ Download the object from the Huggingface Hub and instantiate it. Args: pretrained_name_or_path (`str`, `Path`): - Either the `repo_id` (string) of the object hosted on the Hub, e.g. `lerobot/diffusion_pusht`. - Or a path to a `directory` containing the object files saved using `.save_pretrained`, e.g., `../path/to/my_model_directory/`. revision (`str`, *optional*): Revision on the Hub. Can be a branch name, a git tag or any commit id. Defaults to the latest commit on `main` branch. force_download (`bool`, *optional*, defaults to `False`): Whether to force (re-)downloading the files from the Hub, overriding the existing cache. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on every request. token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. By default, it will use the token cached when running `huggingface-cli login`. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, avoid downloading the file and return the path to the local cached file if it exists. kwargs (`Dict`, *optional*): Additional kwargs to pass to the object during initialization. """ raise NotImplementedError @validate_hf_hub_args def push_to_hub( self, repo_id: str, *, commit_message: str | None = None, private: bool | None = None, token: str | None = None, branch: str | None = None, create_pr: bool | None = None, allow_patterns: list[str] | str | None = None, ignore_patterns: list[str] | str | None = None, delete_patterns: list[str] | str | None = None, card_kwargs: dict[str, Any] | None = None, ) -> str: """ Upload model checkpoint to the Hub. Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more details. Args: repo_id (`str`): ID of the repository to push to (example: `"username/my-model"`). commit_message (`str`, *optional*): Message to commit while pushing. private (`bool`, *optional*): Whether the repository created should be private. If `None` (default), the repo will be public unless the organization's default is private. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. By default, it will use the token cached when running `huggingface-cli login`. branch (`str`, *optional*): The git branch on which to push the model. This defaults to `"main"`. create_pr (`boolean`, *optional*): Whether or not to create a Pull Request from `branch` with that commit. Defaults to `False`. allow_patterns (`List[str]` or `str`, *optional*): If provided, only files matching at least one pattern are pushed. ignore_patterns (`List[str]` or `str`, *optional*): If provided, files matching any of the patterns are not pushed. delete_patterns (`List[str]` or `str`, *optional*): If provided, remote files matching any of the patterns will be deleted from the repo. card_kwargs (`Dict[str, Any]`, *optional*): Additional arguments passed to the card template to customize the card. Returns: The url of the commit of your object in the given repository. """ api = HfApi(token=token) repo_id = api.create_repo(repo_id=repo_id, private=private, exist_ok=True).repo_id if commit_message is None: if "Policy" in self.__class__.__name__: commit_message = "Upload policy" elif "Config" in self.__class__.__name__: commit_message = "Upload config" else: commit_message = f"Upload {self.__class__.__name__}" # Push the files to the repo in a single commit with TemporaryDirectory(ignore_cleanup_errors=True) as tmp: saved_path = Path(tmp) / repo_id self.save_pretrained(saved_path, card_kwargs=card_kwargs) return api.upload_folder( repo_id=repo_id, repo_type="model", folder_path=saved_path, commit_message=commit_message, revision=branch, create_pr=create_pr, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, delete_patterns=delete_patterns, )
lerobot/src/lerobot/utils/hub.py/0
{ "file_path": "lerobot/src/lerobot/utils/hub.py", "repo_id": "lerobot", "token_count": 3818 }
221
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit-tests for the `PolicyServer` core logic. Monkey-patch the `policy` attribute with a stub so that no real model inference is performed. """ from __future__ import annotations import time import pytest import torch from lerobot.configs.types import PolicyFeature from tests.utils import require_package # ----------------------------------------------------------------------------- # Test fixtures # ----------------------------------------------------------------------------- class MockPolicy: """A minimal mock for an actual policy, returning zeros. Refer to tests/policies for tests of the individual policies supported.""" class _Config: robot_type = "dummy_robot" @property def image_features(self) -> dict[str, PolicyFeature]: """Empty image features since this test doesn't use images.""" return {} def predict_action_chunk(self, observation: dict[str, torch.Tensor]) -> torch.Tensor: """Return a chunk of 20 dummy actions.""" batch_size = len(observation["observation.state"]) return torch.zeros(batch_size, 20, 6) def __init__(self): self.config = self._Config() def to(self, *args, **kwargs): # The server calls `policy.to(device)`. This stub ignores it. return self def model(self, batch: dict) -> torch.Tensor: # Return a chunk of 20 dummy actions. batch_size = len(batch["robot_type"]) return torch.zeros(batch_size, 20, 6) @pytest.fixture @require_package("grpc") def policy_server(): """Fresh `PolicyServer` instance with a stubbed-out policy model.""" # Import only when the test actually runs (after decorator check) from lerobot.scripts.server.configs import PolicyServerConfig from lerobot.scripts.server.policy_server import PolicyServer test_config = PolicyServerConfig(host="localhost", port=9999) server = PolicyServer(test_config) # Replace the real policy with our fast, deterministic stub. server.policy = MockPolicy() server.actions_per_chunk = 20 server.device = "cpu" # Add mock lerobot_features that the observation similarity functions need server.lerobot_features = { "observation.state": { "dtype": "float32", "shape": [6], "names": ["joint1", "joint2", "joint3", "joint4", "joint5", "joint6"], } } return server # ----------------------------------------------------------------------------- # Helper utilities for tests # ----------------------------------------------------------------------------- def _make_obs(state: torch.Tensor, timestep: int = 0, must_go: bool = False): """Create a TimedObservation with a given state vector.""" # Import only when needed from lerobot.scripts.server.helpers import TimedObservation return TimedObservation( observation={ "joint1": state[0].item() if len(state) > 0 else 0.0, "joint2": state[1].item() if len(state) > 1 else 0.0, "joint3": state[2].item() if len(state) > 2 else 0.0, "joint4": state[3].item() if len(state) > 3 else 0.0, "joint5": state[4].item() if len(state) > 4 else 0.0, "joint6": state[5].item() if len(state) > 5 else 0.0, }, timestamp=time.time(), timestep=timestep, must_go=must_go, ) # ----------------------------------------------------------------------------- # Tests # ----------------------------------------------------------------------------- def test_time_action_chunk(policy_server): """Verify that `_time_action_chunk` assigns correct timestamps and timesteps.""" start_ts = time.time() start_t = 10 # A chunk of 3 action tensors. action_tensors = [torch.randn(6) for _ in range(3)] timed_actions = policy_server._time_action_chunk(start_ts, action_tensors, start_t) assert len(timed_actions) == 3 # Check timesteps assert [ta.get_timestep() for ta in timed_actions] == [10, 11, 12] # Check timestamps expected_timestamps = [ start_ts, start_ts + policy_server.config.environment_dt, start_ts + 2 * policy_server.config.environment_dt, ] for ta, expected_ts in zip(timed_actions, expected_timestamps, strict=True): assert abs(ta.get_timestamp() - expected_ts) < 1e-6 def test_maybe_enqueue_observation_must_go(policy_server): """An observation with `must_go=True` is always enqueued.""" obs = _make_obs(torch.zeros(6), must_go=True) assert policy_server._enqueue_observation(obs) is True assert policy_server.observation_queue.qsize() == 1 assert policy_server.observation_queue.get_nowait() is obs def test_maybe_enqueue_observation_dissimilar(policy_server): """A dissimilar observation (not `must_go`) is enqueued.""" # Set a last predicted observation. policy_server.last_processed_obs = _make_obs(torch.zeros(6)) # Create a new, dissimilar observation. new_obs = _make_obs(torch.ones(6) * 5) # High norm difference assert policy_server._enqueue_observation(new_obs) is True assert policy_server.observation_queue.qsize() == 1 def test_maybe_enqueue_observation_is_skipped(policy_server): """A similar observation (not `must_go`) is skipped.""" # Set a last predicted observation. policy_server.last_processed_obs = _make_obs(torch.zeros(6)) # Create a new, very similar observation. new_obs = _make_obs(torch.zeros(6) + 1e-4) assert policy_server._enqueue_observation(new_obs) is False assert policy_server.observation_queue.empty() is True def test_obs_sanity_checks(policy_server): """Unit-test the private `_obs_sanity_checks` helper.""" prev = _make_obs(torch.zeros(6), timestep=0) # Case 1 – timestep already predicted policy_server._predicted_timesteps.add(1) obs_same_ts = _make_obs(torch.ones(6), timestep=1) assert policy_server._obs_sanity_checks(obs_same_ts, prev) is False # Case 2 – observation too similar policy_server._predicted_timesteps.clear() obs_similar = _make_obs(torch.zeros(6) + 1e-4, timestep=2) assert policy_server._obs_sanity_checks(obs_similar, prev) is False # Case 3 – genuinely new & dissimilar observation passes obs_ok = _make_obs(torch.ones(6) * 5, timestep=3) assert policy_server._obs_sanity_checks(obs_ok, prev) is True def test_predict_action_chunk(monkeypatch, policy_server): """End-to-end test of `_predict_action_chunk` with a stubbed _get_action_chunk.""" # Import only when needed from lerobot.scripts.server.policy_server import PolicyServer # Force server to act-style policy; patch method to return deterministic tensor policy_server.policy_type = "act" action_dim = 6 batch_size = 1 actions_per_chunk = policy_server.actions_per_chunk def _fake_get_action_chunk(_self, _obs, _type="act"): return torch.zeros(batch_size, actions_per_chunk, action_dim) monkeypatch.setattr(PolicyServer, "_get_action_chunk", _fake_get_action_chunk, raising=True) obs = _make_obs(torch.zeros(6), timestep=5) timed_actions = policy_server._predict_action_chunk(obs) assert len(timed_actions) == actions_per_chunk assert [ta.get_timestep() for ta in timed_actions] == list(range(5, 5 + actions_per_chunk)) for i, ta in enumerate(timed_actions): expected_ts = obs.get_timestamp() + i * policy_server.config.environment_dt assert abs(ta.get_timestamp() - expected_ts) < 1e-6
lerobot/tests/async_inference/test_policy_server.py/0
{ "file_path": "lerobot/tests/async_inference/test_policy_server.py", "repo_id": "lerobot", "token_count": 2904 }
222
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import subprocess import sys from pathlib import Path import pytest from tests.fixtures.constants import DUMMY_REPO_ID from tests.utils import require_package def _find_and_replace(text: str, finds_and_replaces: list[tuple[str, str]]) -> str: for f, r in finds_and_replaces: assert f in text text = text.replace(f, r) return text # TODO(aliberts): Remove usage of subprocess calls and patch code with fixtures def _run_script(path): subprocess.run([sys.executable, path], check=True) def _read_file(path): with open(path) as file: return file.read() @pytest.mark.skip("TODO Fix and remove subprocess / excec calls") def test_example_1(tmp_path, lerobot_dataset_factory): _ = lerobot_dataset_factory(root=tmp_path, repo_id=DUMMY_REPO_ID) path = "examples/1_load_lerobot_dataset.py" file_contents = _read_file(path) file_contents = _find_and_replace( file_contents, [ ('repo_id = "lerobot/pusht"', f'repo_id = "{DUMMY_REPO_ID}"'), ( "LeRobotDataset(repo_id", f"LeRobotDataset(repo_id, root='{str(tmp_path)}'", ), ], ) exec(file_contents, {}) assert Path("outputs/examples/1_load_lerobot_dataset/episode_0.mp4").exists() @pytest.mark.skip("TODO Fix and remove subprocess / excec calls") @require_package("gym_pusht") def test_examples_basic2_basic3_advanced1(): """ Train a model with example 3, check the outputs. Evaluate the trained model with example 2, check the outputs. Calculate the validation loss with advanced example 1, check the outputs. """ ### Test example 3 file_contents = _read_file("examples/3_train_policy.py") # Do fewer steps, use smaller batch, use CPU, and don't complicate things with dataloader workers. file_contents = _find_and_replace( file_contents, [ ("training_steps = 5000", "training_steps = 1"), ("num_workers=4", "num_workers=0"), ('device = torch.device("cuda")', 'device = torch.device("cpu")'), ("batch_size=64", "batch_size=1"), ], ) # Pass empty globals to allow dictionary comprehension https://stackoverflow.com/a/32897127/4391249. exec(file_contents, {}) for file_name in ["model.safetensors", "config.json"]: assert Path(f"outputs/train/example_pusht_diffusion/{file_name}").exists() ### Test example 2 file_contents = _read_file("examples/2_evaluate_pretrained_policy.py") # Do fewer evals, use CPU, and use the local model. file_contents = _find_and_replace( file_contents, [ ( 'pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))', "", ), ( '# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")', 'pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")', ), ('device = torch.device("cuda")', 'device = torch.device("cpu")'), ("step += 1", "break"), ], ) exec(file_contents, {}) assert Path("outputs/eval/example_pusht_diffusion/rollout.mp4").exists() ## Test example 4 file_contents = _read_file("examples/advanced/2_calculate_validation_loss.py") # Run on a single example from the last episode, use CPU, and use the local model. file_contents = _find_and_replace( file_contents, [ ( 'pretrained_policy_path = Path(snapshot_download("lerobot/diffusion_pusht"))', "", ), ( '# pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")', 'pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")', ), ("train_episodes = episodes[:num_train_episodes]", "train_episodes = [0]"), ("val_episodes = episodes[num_train_episodes:]", "val_episodes = [1]"), ("num_workers=4", "num_workers=0"), ('device = torch.device("cuda")', 'device = torch.device("cpu")'), ("batch_size=64", "batch_size=1"), ], ) # Capture the output of the script output_buffer = io.StringIO() sys.stdout = output_buffer exec(file_contents, {}) printed_output = output_buffer.getvalue() # Restore stdout to its original state sys.stdout = sys.__stdout__ assert "Average loss on validation set" in printed_output
lerobot/tests/examples/test_examples.py/0
{ "file_path": "lerobot/tests/examples/test_examples.py", "repo_id": "lerobot", "token_count": 2166 }
223
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch.optim.lr_scheduler import LambdaLR from lerobot.constants import SCHEDULER_STATE from lerobot.optim.schedulers import ( CosineDecayWithWarmupSchedulerConfig, DiffuserSchedulerConfig, VQBeTSchedulerConfig, load_scheduler_state, save_scheduler_state, ) def test_diffuser_scheduler(optimizer): config = DiffuserSchedulerConfig(name="cosine", num_warmup_steps=5) scheduler = config.build(optimizer, num_training_steps=100) assert isinstance(scheduler, LambdaLR) optimizer.step() # so that we don't get torch warning scheduler.step() expected_state_dict = { "_get_lr_called_within_step": False, "_last_lr": [0.0002], "_step_count": 2, "base_lrs": [0.001], "last_epoch": 1, "lr_lambdas": [None], } assert scheduler.state_dict() == expected_state_dict def test_vqbet_scheduler(optimizer): config = VQBeTSchedulerConfig(num_warmup_steps=10, num_vqvae_training_steps=20, num_cycles=0.5) scheduler = config.build(optimizer, num_training_steps=100) assert isinstance(scheduler, LambdaLR) optimizer.step() scheduler.step() expected_state_dict = { "_get_lr_called_within_step": False, "_last_lr": [0.001], "_step_count": 2, "base_lrs": [0.001], "last_epoch": 1, "lr_lambdas": [None], } assert scheduler.state_dict() == expected_state_dict def test_cosine_decay_with_warmup_scheduler(optimizer): config = CosineDecayWithWarmupSchedulerConfig( num_warmup_steps=10, num_decay_steps=90, peak_lr=0.01, decay_lr=0.001 ) scheduler = config.build(optimizer, num_training_steps=100) assert isinstance(scheduler, LambdaLR) optimizer.step() scheduler.step() expected_state_dict = { "_get_lr_called_within_step": False, "_last_lr": [0.0001818181818181819], "_step_count": 2, "base_lrs": [0.001], "last_epoch": 1, "lr_lambdas": [None], } assert scheduler.state_dict() == expected_state_dict def test_save_scheduler_state(scheduler, tmp_path): save_scheduler_state(scheduler, tmp_path) assert (tmp_path / SCHEDULER_STATE).is_file() def test_save_load_scheduler_state(scheduler, tmp_path): save_scheduler_state(scheduler, tmp_path) loaded_scheduler = load_scheduler_state(scheduler, tmp_path) assert scheduler.state_dict() == loaded_scheduler.state_dict()
lerobot/tests/optim/test_schedulers.py/0
{ "file_path": "lerobot/tests/optim/test_schedulers.py", "repo_id": "lerobot", "token_count": 1223 }
224
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io from multiprocessing import Event, Queue from pickle import UnpicklingError import pytest import torch from lerobot.utils.transition import Transition from tests.utils import require_cuda, require_package @require_package("grpc") def test_bytes_buffer_size_empty_buffer(): from lerobot.transport.utils import bytes_buffer_size """Test with an empty buffer.""" buffer = io.BytesIO() assert bytes_buffer_size(buffer) == 0 # Ensure position is reset to beginning assert buffer.tell() == 0 @require_package("grpc") def test_bytes_buffer_size_small_buffer(): from lerobot.transport.utils import bytes_buffer_size """Test with a small buffer.""" buffer = io.BytesIO(b"Hello, World!") assert bytes_buffer_size(buffer) == 13 assert buffer.tell() == 0 @require_package("grpc") def test_bytes_buffer_size_large_buffer(): from lerobot.transport.utils import CHUNK_SIZE, bytes_buffer_size """Test with a large buffer.""" data = b"x" * (CHUNK_SIZE * 2 + 1000) buffer = io.BytesIO(data) assert bytes_buffer_size(buffer) == len(data) assert buffer.tell() == 0 @require_package("grpc") def test_send_bytes_in_chunks_empty_data(): from lerobot.transport.utils import send_bytes_in_chunks, services_pb2 """Test sending empty data.""" message_class = services_pb2.InteractionMessage chunks = list(send_bytes_in_chunks(b"", message_class)) assert len(chunks) == 0 @require_package("grpc") def test_single_chunk_small_data(): from lerobot.transport.utils import send_bytes_in_chunks, services_pb2 """Test data that fits in a single chunk.""" data = b"Some data" message_class = services_pb2.InteractionMessage chunks = list(send_bytes_in_chunks(data, message_class)) assert len(chunks) == 1 assert chunks[0].data == b"Some data" assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_END @require_package("grpc") def test_not_silent_mode(): from lerobot.transport.utils import send_bytes_in_chunks, services_pb2 """Test not silent mode.""" data = b"Some data" message_class = services_pb2.InteractionMessage chunks = list(send_bytes_in_chunks(data, message_class, silent=False)) assert len(chunks) == 1 assert chunks[0].data == b"Some data" @require_package("grpc") def test_send_bytes_in_chunks_large_data(): from lerobot.transport.utils import CHUNK_SIZE, send_bytes_in_chunks, services_pb2 """Test sending large data.""" data = b"x" * (CHUNK_SIZE * 2 + 1000) message_class = services_pb2.InteractionMessage chunks = list(send_bytes_in_chunks(data, message_class)) assert len(chunks) == 3 assert chunks[0].data == b"x" * CHUNK_SIZE assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_BEGIN assert chunks[1].data == b"x" * CHUNK_SIZE assert chunks[1].transfer_state == services_pb2.TransferState.TRANSFER_MIDDLE assert chunks[2].data == b"x" * 1000 assert chunks[2].transfer_state == services_pb2.TransferState.TRANSFER_END @require_package("grpc") def test_send_bytes_in_chunks_large_data_with_exact_chunk_size(): from lerobot.transport.utils import CHUNK_SIZE, send_bytes_in_chunks, services_pb2 """Test sending large data with exact chunk size.""" data = b"x" * CHUNK_SIZE message_class = services_pb2.InteractionMessage chunks = list(send_bytes_in_chunks(data, message_class)) assert len(chunks) == 1 assert chunks[0].data == data assert chunks[0].transfer_state == services_pb2.TransferState.TRANSFER_END @require_package("grpc") def test_receive_bytes_in_chunks_empty_data(): from lerobot.transport.utils import receive_bytes_in_chunks """Test receiving empty data.""" queue = Queue() shutdown_event = Event() # Empty iterator receive_bytes_in_chunks(iter([]), queue, shutdown_event) assert queue.empty() @require_package("grpc") def test_receive_bytes_in_chunks_single_chunk(): from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2 """Test receiving a single chunk message.""" queue = Queue() shutdown_event = Event() data = b"Single chunk data" chunks = [ services_pb2.InteractionMessage(data=data, transfer_state=services_pb2.TransferState.TRANSFER_END) ] receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) assert queue.get(timeout=0.01) == data assert queue.empty() @require_package("grpc") def test_receive_bytes_in_chunks_single_not_end_chunk(): from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2 """Test receiving a single chunk message.""" queue = Queue() shutdown_event = Event() data = b"Single chunk data" chunks = [ services_pb2.InteractionMessage(data=data, transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE) ] receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) assert queue.empty() @require_package("grpc") def test_receive_bytes_in_chunks_multiple_chunks(): from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2 """Test receiving a multi-chunk message.""" queue = Queue() shutdown_event = Event() chunks = [ services_pb2.InteractionMessage( data=b"First ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN ), services_pb2.InteractionMessage( data=b"Middle ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE ), services_pb2.InteractionMessage(data=b"Last", transfer_state=services_pb2.TransferState.TRANSFER_END), ] receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) assert queue.get(timeout=0.01) == b"First Middle Last" assert queue.empty() @require_package("grpc") def test_receive_bytes_in_chunks_multiple_messages(): from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2 """Test receiving multiple complete messages in sequence.""" queue = Queue() shutdown_event = Event() chunks = [ # First message - single chunk services_pb2.InteractionMessage( data=b"Message1", transfer_state=services_pb2.TransferState.TRANSFER_END ), # Second message - multi chunk services_pb2.InteractionMessage( data=b"Start2 ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN ), services_pb2.InteractionMessage( data=b"Middle2 ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE ), services_pb2.InteractionMessage(data=b"End2", transfer_state=services_pb2.TransferState.TRANSFER_END), # Third message - single chunk services_pb2.InteractionMessage( data=b"Message3", transfer_state=services_pb2.TransferState.TRANSFER_END ), ] receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) # Should have three messages in queue assert queue.get(timeout=0.01) == b"Message1" assert queue.get(timeout=0.01) == b"Start2 Middle2 End2" assert queue.get(timeout=0.01) == b"Message3" assert queue.empty() @require_package("grpc") def test_receive_bytes_in_chunks_shutdown_during_receive(): from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2 """Test that shutdown event stops receiving mid-stream.""" queue = Queue() shutdown_event = Event() shutdown_event.set() chunks = [ services_pb2.InteractionMessage( data=b"First ", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN ), services_pb2.InteractionMessage( data=b"Middle ", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE ), services_pb2.InteractionMessage(data=b"Last", transfer_state=services_pb2.TransferState.TRANSFER_END), ] receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) assert queue.empty() @require_package("grpc") def test_receive_bytes_in_chunks_only_begin_chunk(): from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2 """Test receiving only a BEGIN chunk without END.""" queue = Queue() shutdown_event = Event() chunks = [ services_pb2.InteractionMessage( data=b"Start", transfer_state=services_pb2.TransferState.TRANSFER_BEGIN ), # No END chunk ] receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) assert queue.empty() @require_package("grpc") def test_receive_bytes_in_chunks_missing_begin(): from lerobot.transport.utils import receive_bytes_in_chunks, services_pb2 """Test receiving chunks starting with MIDDLE instead of BEGIN.""" queue = Queue() shutdown_event = Event() chunks = [ # Missing BEGIN services_pb2.InteractionMessage( data=b"Middle", transfer_state=services_pb2.TransferState.TRANSFER_MIDDLE ), services_pb2.InteractionMessage(data=b"End", transfer_state=services_pb2.TransferState.TRANSFER_END), ] receive_bytes_in_chunks(iter(chunks), queue, shutdown_event) # The implementation continues from where it is, so we should get partial data assert queue.get(timeout=0.01) == b"MiddleEnd" assert queue.empty() # Tests for state_to_bytes and bytes_to_state_dict @require_package("grpc") def test_state_to_bytes_empty_dict(): from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes """Test converting empty state dict to bytes.""" state_dict = {} data = state_to_bytes(state_dict) reconstructed = bytes_to_state_dict(data) assert reconstructed == state_dict @require_package("grpc") def test_bytes_to_state_dict_empty_data(): from lerobot.transport.utils import bytes_to_state_dict """Test converting empty data to state dict.""" with pytest.raises(EOFError): bytes_to_state_dict(b"") @require_package("grpc") def test_state_to_bytes_simple_dict(): from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes """Test converting simple state dict to bytes.""" state_dict = { "layer1.weight": torch.randn(10, 5), "layer1.bias": torch.randn(10), "layer2.weight": torch.randn(1, 10), "layer2.bias": torch.randn(1), } data = state_to_bytes(state_dict) assert isinstance(data, bytes) assert len(data) > 0 reconstructed = bytes_to_state_dict(data) assert len(reconstructed) == len(state_dict) for key in state_dict: assert key in reconstructed assert torch.allclose(state_dict[key], reconstructed[key]) @require_package("grpc") def test_state_to_bytes_various_dtypes(): from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes """Test converting state dict with various tensor dtypes.""" state_dict = { "float32": torch.randn(5, 5), "float64": torch.randn(3, 3).double(), "int32": torch.randint(0, 100, (4, 4), dtype=torch.int32), "int64": torch.randint(0, 100, (2, 2), dtype=torch.int64), "bool": torch.tensor([True, False, True]), "uint8": torch.randint(0, 255, (3, 3), dtype=torch.uint8), } data = state_to_bytes(state_dict) reconstructed = bytes_to_state_dict(data) for key in state_dict: assert reconstructed[key].dtype == state_dict[key].dtype if state_dict[key].dtype == torch.bool: assert torch.equal(state_dict[key], reconstructed[key]) else: assert torch.allclose(state_dict[key], reconstructed[key]) @require_package("grpc") def test_bytes_to_state_dict_invalid_data(): from lerobot.transport.utils import bytes_to_state_dict """Test bytes_to_state_dict with invalid data.""" with pytest.raises(UnpicklingError): bytes_to_state_dict(b"This is not a valid torch save file") @require_cuda @require_package("grpc") def test_state_to_bytes_various_dtypes_cuda(): from lerobot.transport.utils import bytes_to_state_dict, state_to_bytes """Test converting state dict with various tensor dtypes.""" state_dict = { "float32": torch.randn(5, 5).cuda(), "float64": torch.randn(3, 3).double().cuda(), "int32": torch.randint(0, 100, (4, 4), dtype=torch.int32).cuda(), "int64": torch.randint(0, 100, (2, 2), dtype=torch.int64).cuda(), "bool": torch.tensor([True, False, True]), "uint8": torch.randint(0, 255, (3, 3), dtype=torch.uint8), } data = state_to_bytes(state_dict) reconstructed = bytes_to_state_dict(data) for key in state_dict: assert reconstructed[key].dtype == state_dict[key].dtype if state_dict[key].dtype == torch.bool: assert torch.equal(state_dict[key], reconstructed[key]) else: assert torch.allclose(state_dict[key], reconstructed[key]) @require_package("grpc") def test_python_object_to_bytes_none(): from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes """Test converting None to bytes.""" obj = None data = python_object_to_bytes(obj) reconstructed = bytes_to_python_object(data) assert reconstructed is None @pytest.mark.parametrize( "obj", [ 42, -123, 3.14159, -2.71828, "Hello, World!", "Unicode: 你好世界 🌍", True, False, b"byte string", [], [1, 2, 3], [1, "two", 3.0, True, None], {}, {"key": "value", "number": 123, "nested": {"a": 1}}, (), (1, 2, 3), ], ) @require_package("grpc") def test_python_object_to_bytes_simple_types(obj): from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes """Test converting simple Python types.""" data = python_object_to_bytes(obj) reconstructed = bytes_to_python_object(data) assert reconstructed == obj assert type(reconstructed) is type(obj) @require_package("grpc") def test_python_object_to_bytes_with_tensors(): from lerobot.transport.utils import bytes_to_python_object, python_object_to_bytes """Test converting objects containing PyTorch tensors.""" obj = { "tensor": torch.randn(5, 5), "list_with_tensor": [1, 2, torch.randn(3, 3), "string"], "nested": { "tensor1": torch.randn(2, 2), "tensor2": torch.tensor([1, 2, 3]), }, } data = python_object_to_bytes(obj) reconstructed = bytes_to_python_object(data) assert torch.allclose(obj["tensor"], reconstructed["tensor"]) assert reconstructed["list_with_tensor"][0] == 1 assert reconstructed["list_with_tensor"][3] == "string" assert torch.allclose(obj["list_with_tensor"][2], reconstructed["list_with_tensor"][2]) assert torch.allclose(obj["nested"]["tensor1"], reconstructed["nested"]["tensor1"]) assert torch.equal(obj["nested"]["tensor2"], reconstructed["nested"]["tensor2"]) @require_package("grpc") def test_transitions_to_bytes_empty_list(): from lerobot.transport.utils import bytes_to_transitions, transitions_to_bytes """Test converting empty transitions list.""" transitions = [] data = transitions_to_bytes(transitions) reconstructed = bytes_to_transitions(data) assert reconstructed == transitions assert isinstance(reconstructed, list) @require_package("grpc") def test_transitions_to_bytes_single_transition(): from lerobot.transport.utils import bytes_to_transitions, transitions_to_bytes """Test converting a single transition.""" transition = Transition( state={"image": torch.randn(3, 64, 64), "state": torch.randn(10)}, action=torch.randn(5), reward=torch.tensor(1.5), done=torch.tensor(False), next_state={"image": torch.randn(3, 64, 64), "state": torch.randn(10)}, ) transitions = [transition] data = transitions_to_bytes(transitions) reconstructed = bytes_to_transitions(data) assert len(reconstructed) == 1 assert_transitions_equal(transitions[0], reconstructed[0]) @require_package("grpc") def assert_transitions_equal(t1: Transition, t2: Transition): """Helper to assert two transitions are equal.""" assert_observation_equal(t1["state"], t2["state"]) assert torch.allclose(t1["action"], t2["action"]) assert torch.allclose(t1["reward"], t2["reward"]) assert torch.equal(t1["done"], t2["done"]) assert_observation_equal(t1["next_state"], t2["next_state"]) @require_package("grpc") def assert_observation_equal(o1: dict, o2: dict): """Helper to assert two observations are equal.""" assert set(o1.keys()) == set(o2.keys()) for key in o1: assert torch.allclose(o1[key], o2[key]) @require_package("grpc") def test_transitions_to_bytes_multiple_transitions(): from lerobot.transport.utils import bytes_to_transitions, transitions_to_bytes """Test converting multiple transitions.""" transitions = [] for i in range(5): transition = Transition( state={"data": torch.randn(10)}, action=torch.randn(3), reward=torch.tensor(float(i)), done=torch.tensor(i == 4), next_state={"data": torch.randn(10)}, ) transitions.append(transition) data = transitions_to_bytes(transitions) reconstructed = bytes_to_transitions(data) assert len(reconstructed) == len(transitions) for original, reconstructed_item in zip(transitions, reconstructed, strict=False): assert_transitions_equal(original, reconstructed_item) @require_package("grpc") def test_receive_bytes_in_chunks_unknown_state(): from lerobot.transport.utils import receive_bytes_in_chunks """Test receive_bytes_in_chunks with an unknown transfer state.""" # Mock the gRPC message object, which has `transfer_state` and `data` attributes. class MockMessage: def __init__(self, transfer_state, data): self.transfer_state = transfer_state self.data = data # 10 is not a valid TransferState enum value bad_iterator = [MockMessage(transfer_state=10, data=b"bad_data")] output_queue = Queue() shutdown_event = Event() with pytest.raises(ValueError, match="Received unknown transfer state"): receive_bytes_in_chunks(bad_iterator, output_queue, shutdown_event)
lerobot/tests/transport/test_transport_utils.py/0
{ "file_path": "lerobot/tests/transport/test_transport_utils.py", "repo_id": "lerobot", "token_count": 7325 }
225
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import asyncio from fastapi import FastAPI from pydantic import BaseModel, ConfigDict from typing import Optional from fastapi import FastAPI, Request import argparse import asyncio from fastapi import FastAPI import uvicorn from e2b_code_interpreter.models import Execution from dotenv import load_dotenv from e2b_code_interpreter import AsyncSandbox load_dotenv() class BatchRequest(BaseModel): """ BatchRequest is a data model representing a batch processing request. Attributes: scripts (list[str]): A list of script names or paths to be executed. languages (list[str]): The programming languages for each script in the list. timeout (int): The maximum allowed execution time for each script in seconds. request_timeout (int): The maximum allowed time for the entire batch request in seconds. """ scripts: list[str] languages: list[str] timeout: int request_timeout: int class ScriptResult(BaseModel): """ ScriptResult is a Pydantic model that represents the result of a script execution. Attributes: execution (Optional[Execution]): An optional instance of the `Execution` class that contains details about the script's execution, such as status, output, or any other relevant metadata. exception_str (Optional[str]): An optional string that captures the exception message or details if an error occurred during the script's execution. model_config (ConfigDict): A configuration dictionary that allows arbitrary types to be used within the Pydantic model. This is necessary to support custom types like `Execution` within the model. """ execution: Optional[Execution] exception_str: Optional[str] # required to allow arbitrary types in pydantic models such as Execution model_config = ConfigDict(arbitrary_types_allowed=True) def create_app(args): """ Creates and configures a FastAPI application instance. Args: args: An object containing configuration parameters for the application. - num_sandboxes (int): The maximum number of concurrent sandboxes allowed. Returns: FastAPI: A configured FastAPI application instance. The application includes the following endpoints: 1. GET /health: - Returns the health status of the application. - Response: {"status": "ok"} 2. POST /execute_batch: - Executes a batch of scripts in an isolated sandbox environment. - Request Body: BatchRequest object containing: - languages (list[str]): The programming languages of the scripts (python or javascript). - timeout (int): The maximum execution time for each script. - request_timeout (int): The timeout for the request itself. - scripts (List[str]): A list of scripts to execute. - Response: A list of ScriptResult objects for each script, containing: - execution: The result of the script execution. - exception_str: Any exception encountered during execution. Notes: - A semaphore is used to limit the number of concurrent sandboxes. - Each script execution is wrapped in a timeout to prevent hanging. - Sandboxes are cleaned up after execution, even in case of errors. """ app = FastAPI() # Instantiate semaphore and attach it to app state app.state.sandbox_semaphore = asyncio.Semaphore(args.max_num_sandboxes) @app.get("/health") async def health(): return {"status": "ok"} @app.post("/execute_batch") async def execute_batch(batch: BatchRequest, request: Request): semaphore = request.app.state.sandbox_semaphore languages = batch.languages timeout = batch.timeout request_timeout = batch.request_timeout asyncio_timeout = batch.timeout + 1 async def run_script(script: str, language: str) -> ScriptResult: async with semaphore: try: sandbox = await AsyncSandbox.create( timeout=timeout, request_timeout=request_timeout, ) execution = await asyncio.wait_for( sandbox.run_code(script, language=language), timeout=asyncio_timeout, ) return ScriptResult(execution=execution, exception_str=None) except Exception as e: return ScriptResult(execution=None, exception_str=str(e)) finally: try: await sandbox.kill() except Exception: pass tasks = [run_script(script, lang) for script, lang in zip(batch.scripts, batch.languages)] return await asyncio.gather(*tasks) return app def parse_args(): """ Parse command-line arguments for the e2b_router script. Arguments: --host (str): The hostname or IP address to bind the server to. Defaults to "0.0.0.0" (binds to all interfaces). --port (int): The port number on which the server will listen. Defaults to 8000. --max_num_sandboxes (int): The maximum number of sandboxes that can be created or managed simultaneously. Defaults to 20. Returns: argparse.Namespace: Parsed command-line arguments as an object. """ parser = argparse.ArgumentParser() parser.add_argument("--host", default="0.0.0.0") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--max_num_sandboxes", type=int, default=20) return parser.parse_args() if __name__ == "__main__": args = parse_args() app = create_app(args) uvicorn.run(app, host=args.host, port=args.port)
open-r1/scripts/e2b_router.py/0
{ "file_path": "open-r1/scripts/e2b_router.py", "repo_id": "open-r1", "token_count": 2427 }
226
#!/bin/bash #SBATCH --job-name=deepseek-r1-generation #SBATCH --partition=hopper-prod #SBATCH --qos=normal #SBATCH --nodes=2 #SBATCH --exclusive #SBATCH --gpus-per-node=8 #SBATCH --output=./logs/%x-%j.out #SBATCH --error=./logs/%x-%j.err #SBATCH --time=04-00:00:00 # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in --hf-dataset) HF_DATASET="$2" shift 2 ;; --hf-dataset-config) HF_DATASET_CONFIG="$2" shift 2 ;; --hf-dataset-split) HF_DATASET_SPLIT="$2" shift 2 ;; --prompt-column) PROMPT_COLUMN="$2" shift 2 ;; --prompt-template) PROMPT_TEMPLATE="$2" shift 2 ;; --model) MODEL="$2" shift 2 ;; --temperature) TEMPERATURE="$2" shift 2 ;; --top-p) TOP_P="$2" shift 2 ;; --max-new-tokens) MAX_NEW_TOKENS="$2" shift 2 ;; --num-generations) NUM_GENERATIONS="$2" shift 2 ;; --input-batch-size) INPUT_BATCH_SIZE="$2" shift 2 ;; --client-replicas) CLIENT_REPLICAS="$2" shift 2 ;; --timeout) TIMEOUT="$2" shift 2 ;; --retries) RETRIES="$2" shift 2 ;; --hf-output-dataset) HF_OUTPUT_DATASET="$2" shift 2 ;; --private) PRIVATE="true" shift ;; *) echo "Unknown parameter: $1" exit 1 ;; esac done if [ -z "$MODEL" ] || [ -z "$HF_DATASET" ]; then echo "Error: --model and --hf-dataset are required parameters" exit 1 fi # Set default values for optional parameters HF_DATASET_SPLIT=${HF_DATASET_SPLIT:-"train"} PROMPT_COLUMN=${PROMPT_COLUMN:-"prompt"} PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-"{{ instruction }}"} MAX_NEW_TOKENS=${MAX_NEW_TOKENS:-8192} NUM_GENERATIONS=${NUM_GENERATIONS:-1} INPUT_BATCH_SIZE=${INPUT_BATCH_SIZE:-64} CLIENT_REPLICAS=${CLIENT_REPLICAS:-1} TIMEOUT=${TIMEOUT:-900} RETRIES=${RETRIES:-0} PRIVATE=${PRIVATE:-"false"} # Print all input arguments echo "Input arguments:" echo "MODEL: $MODEL" echo "HF_DATASET: $HF_DATASET" echo "HF_DATASET_CONFIG: $HF_DATASET_CONFIG" echo "HF_DATASET_SPLIT: $HF_DATASET_SPLIT" echo "PROMPT_COLUMN: $PROMPT_COLUMN" echo "PROMPT_TEMPLATE: $PROMPT_TEMPLATE" echo "TEMPERATURE: $TEMPERATURE" echo "TOP_P: $TOP_P" echo "MAX_NEW_TOKENS: $MAX_NEW_TOKENS" echo "NUM_GENERATIONS: $NUM_GENERATIONS" echo "INPUT_BATCH_SIZE: $INPUT_BATCH_SIZE" echo "CLIENT_REPLICAS: $CLIENT_REPLICAS" echo "TIMEOUT: $TIMEOUT" echo "RETRIES: $RETRIES" echo "HF_OUTPUT_DATASET: $HF_OUTPUT_DATASET" echo "PRIVATE: $PRIVATE" echo "-------------------" set -ex module load cuda/12.4 export LD_LIBRARY_PATH=.venv/lib/python3.11/site-packages/nvidia/nvjitlink/lib echo "SLURM_JOB_ID: $SLURM_JOB_ID" echo "SLURM_JOB_NODELIST: $SLURM_JOB_NODELIST" source openr1/bin/activate # Getting the node names nodes=$(scontrol show hostnames "$SLURM_JOB_NODELIST") nodes_array=($nodes) # Get the IP address of the head node head_node=${nodes_array[0]} head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) # Start Ray head node port=6379 ip_head=$head_node_ip:$port export ip_head echo "IP Head: $ip_head" echo "Starting HEAD at $head_node" srun --nodes=1 --ntasks=1 -w "$head_node" \ ray start --head --node-ip-address="$head_node_ip" --port=$port \ --dashboard-host=0.0.0.0 \ --dashboard-port=8265 \ --block & # Give some time to head node to start... sleep 10 # Start Ray worker nodes worker_num=$((SLURM_JOB_NUM_NODES - 1)) # Start from 1 (0 is head node) for ((i = 1; i <= worker_num; i++)); do node_i=${nodes_array[$i]} echo "Starting WORKER $i at $node_i" srun --nodes=1 --ntasks=1 -w "$node_i" \ ray start --address "$ip_head" \ --block & sleep 5 done # Give some time to the Ray cluster to gather info echo "Waiting a bit for Ray cluster to gather node info..." sleep 60 # Run vllm RAY_ADDRESS="http://$head_node_ip:8265" ray job submit \ --working-dir src/open_r1 \ --no-wait \ --job-id vllm-server \ -- vllm serve $MODEL \ --tensor-parallel-size $SLURM_GPUS_PER_NODE \ --pipeline-parallel-size $SLURM_JOB_NUM_NODES \ --gpu-memory-utilization=0.85 \ --max-model-len 16384 \ --enable-chunked-prefill \ --trust-remote-code \ --distributed-executor-backend ray # wait for vllm to load the model echo "Waiting for vLLM (http://$head_node_ip:8000) server to be up..." # wait for vllm to load and serve the model while true; do if curl -s -o /dev/null -w "%{http_code}" http://$head_node_ip:8000 >/dev/null 2>&1; then echo "Received response from http://$head_node_ip:8000" break else echo "Still waiting... (Press Ctrl+C to cancel)" sleep 60 fi done echo "Checking available models..." curl http://$head_node_ip:8000/v1/models echo "Executing sanity check..." curl http://$head_node_ip:8000/v1/completions \ -H "Content-Type: application/json" \ -d "{ \"model\": \"$MODEL\", \"prompt\": \"<|begin▁of▁sentence|><|User|>hi, how are you?<|Assistant|>\", \"max_tokens\": 2048, \"temperature\": 0.6 }" # Finally submit the job to the cluster echo "Submitting job to ray cluster..." RAY_ADDRESS="http://$head_node_ip:8265" ray job submit \ --working-dir src/open_r1 \ --job-id generate \ -- python -u generate.py \ --model "$MODEL" \ --hf-dataset "$HF_DATASET" \ ${HF_DATASET_CONFIG:+--hf-dataset-config "$HF_DATASET_CONFIG"} \ --hf-dataset-split "$HF_DATASET_SPLIT" \ --prompt-column "$PROMPT_COLUMN" \ --prompt-template "$PROMPT_TEMPLATE" \ ${TEMPERATURE:+--temperature "$TEMPERATURE"} \ ${TOP_P:+--top-p "$TOP_P"} \ --max-new-tokens "$MAX_NEW_TOKENS" \ --num-generations "$NUM_GENERATIONS" \ --input-batch-size "$INPUT_BATCH_SIZE" \ --client-replicas "$CLIENT_REPLICAS" \ --timeout "$TIMEOUT" \ --retries "$RETRIES" \ ${HF_OUTPUT_DATASET:+--hf-output-dataset "$HF_OUTPUT_DATASET"} \ ${PRIVATE:+--private} \ --vllm-server-url "http://$head_node_ip:8000/v1" mkdir -p ray_logs echo "Downloading Ray job logs..." RAY_ADDRESS="http://$head_node_ip:8265" ray job logs --job-id vllm-server > ray_logs/vllm-server-${SLURM_JOB_ID}.log RAY_ADDRESS="http://$head_node_ip:8265" ray job logs --job-id generate > ray_logs/generate-${SLURM_JOB_ID}.log
open-r1/slurm/generate.slurm/0
{ "file_path": "open-r1/slurm/generate.slurm", "repo_id": "open-r1", "token_count": 3431 }
227
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code execution providers for executing and evaluating code snippets.""" import abc import asyncio from typing import List, Optional from ..utils import is_e2b_available, is_morph_available if is_e2b_available(): from e2b_code_interpreter import AsyncSandbox from e2b_code_interpreter.models import Execution from .routed_sandbox import RoutedSandbox else: AsyncSandbox = None Execution = None RoutedSandbox = None if is_morph_available(): from morphcloud.api import MorphCloudClient from morphcloud.sandbox import Sandbox from .routed_morph import RoutedMorphSandbox else: MorphCloudClient = None Sandbox = None RoutedMorphSandbox = None class CodeExecutionProvider(abc.ABC): """Abstract base class for code execution providers.""" @abc.abstractmethod def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]: """Execute multiple scripts and return their reward values. Args: scripts: List of code scripts to execute language: The programming language of the scripts Returns: List of float rewards (one per script) """ pass class E2BProvider(CodeExecutionProvider): """Provider that executes code using E2B sandboxes.""" def __init__(self, num_parallel: int = 2, e2b_router_url: Optional[str] = None): """Initialize the E2B provider. Args: num_parallel: Number of parallel sandboxes to use e2b_router_url: URL for the E2B router (if using router mode) """ if not is_e2b_available(): raise ImportError( "E2B is not available and required for this provider. Please install E2B with " "`pip install e2b-code-interpreter` and add an API key to a `.env` file." ) self.num_parallel = num_parallel self.e2b_router_url = e2b_router_url def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]: """Execute scripts using E2B sandboxes. If e2b_router_url is provided, uses the RoutedSandbox for batch processing. Otherwise, uses direct AsyncSandbox with parallelization. """ if self.e2b_router_url is not None: routed_sandbox = RoutedSandbox(router_url=self.e2b_router_url) executions = routed_sandbox.run_code( scripts=scripts, languages=languages, timeout=30, request_timeout=28, ) rewards = [] for execution in executions: try: reward = float(execution.text) rewards.append(reward) except Exception: rewards.append(None) return rewards try: rewards = self._run_async_from_sync(scripts, languages, self.num_parallel) except Exception as e: print(f"Error from E2B executor: {e}") rewards = [0.0] * len(scripts) return rewards def _run_async_from_sync(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]: """Function wrapping the `_run_async` function.""" try: rewards = asyncio.run(self._run_async(scripts, languages, num_parallel)) except Exception as e: print(f"Error from E2B executor async: {e}") raise e return rewards async def _run_async(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]: semaphore = asyncio.Semaphore(num_parallel) tasks = [self._run_script(script, languages, semaphore) for script in scripts] results = await asyncio.gather(*tasks) rewards = list(results) return rewards async def _run_script(self, script: str, languages: List[str], semaphore: asyncio.Semaphore) -> float: # We set a timeout margin, as the AsyncSandbox timeout does not seem to work # These values are based on running 256 examples with the gold solution # from open-r1/verifiable-coding-problems-python_decontaminated # see scripts/benchmark_e2b.py SANDBOX_TIMEOUT = 30 MARGIN = 2 REQUEST_TIMEOUT = SANDBOX_TIMEOUT - MARGIN ASYNCIO_TIMEOUT = SANDBOX_TIMEOUT + MARGIN async with semaphore: try: sandbox = await AsyncSandbox.create(timeout=SANDBOX_TIMEOUT, request_timeout=REQUEST_TIMEOUT) execution = await asyncio.wait_for( sandbox.run_code(script, languages=languages), timeout=ASYNCIO_TIMEOUT, ) return float(execution.text) except (TypeError, ValueError): return 0.0 except asyncio.TimeoutError: print("Operation timed out") return 0.0 except Exception as e: print(f"Error in `_run_script` from E2B sandbox ID {sandbox.sandbox_id} : {e}") return 0.0 finally: try: await sandbox.kill() except Exception as e: print(f"Error from E2B executor kill with sandbox ID {sandbox.sandbox_id} : {e}") class MorphProvider(CodeExecutionProvider): """Provider that executes code using MorphCloud's Sandbox API.""" def __init__(self, num_parallel: int = 2, morph_router_url: Optional[str] = None): """Initialize the Morph provider. Args: num_parallel: Number of parallel executions to use morph_router_url: URL for the MorphCloud router (if using router mode) """ if not is_morph_available(): raise ImportError( "MorphCloud is not available and required for this provider. Please install MorphCloud with " "`pip install morphcloud` and add an API key to a `.env` file." ) try: from dotenv import load_dotenv load_dotenv() except ImportError: print("Warning: python-dotenv not installed. Environment variables must be set directly.") self.num_parallel = num_parallel self.morph_router_url = morph_router_url if self.morph_router_url is not None: self.routed_sandbox = RoutedMorphSandbox(router_url=self.morph_router_url) return import os self.api_key = os.getenv("MORPH_API_KEY") if not self.api_key: raise ValueError("MorphCloud API key not found. Please set the MORPH_API_KEY environment variable.") try: self.client = MorphCloudClient(api_key=self.api_key) self.Sandbox = Sandbox except ImportError as e: raise ImportError(f"Required MorphCloud dependencies not installed: {e}") def execute_scripts(self, scripts: List[str], languages: List[str]) -> List[float]: """Execute scripts using MorphCloud Sandbox API. Args: scripts: List of Python scripts to execute language: Programming language Returns: List of float rewards (one per script) """ if hasattr(self, "routed_sandbox"): try: results = self.routed_sandbox.run_code( scripts=scripts, languages=languages, timeout=90, request_timeout=96, ) rewards = [] for result in results: try: reward = float(result.text) rewards.append(reward) except (ValueError, AttributeError): rewards.append(0.0) return rewards except Exception as e: print(f"Error from MorphCloud router: {e}") return [0.0] * len(scripts) import asyncio try: rewards = asyncio.run(self._run_async(scripts, languages, self.num_parallel)) except Exception as e: print(f"Error from MorphCloud executor: {e}") rewards = [0.0] * len(scripts) return rewards async def _run_async(self, scripts: List[str], languages: List[str], num_parallel: int) -> List[float]: """Run multiple scripts concurrently with limited parallelism. Args: scripts: List of scripts to execute language: Programming language num_parallel: Maximum number of concurrent executions Returns: List of rewards """ semaphore = asyncio.Semaphore(num_parallel) tasks = [self._run_script(script, languages, semaphore) for script in scripts] results = await asyncio.gather(*tasks) return list(results) async def _run_script(self, script: str, languages: List[str], semaphore: asyncio.Semaphore) -> float: """Execute a single script in a MorphCloud Sandbox. Args: script: The script to execute language: Programming language semaphore: Semaphore to limit concurrency Returns: Float reward from script execution """ SANDBOX_TIMEOUT = 90 MARGIN = 6 ASYNCIO_TIMEOUT = SANDBOX_TIMEOUT + MARGIN sandbox = None async with semaphore: try: sandbox = await asyncio.to_thread(self.Sandbox.new, client=self.client, ttl_seconds=SANDBOX_TIMEOUT) result = await asyncio.wait_for( asyncio.to_thread( sandbox.run_code, script, languages=languages, timeout=SANDBOX_TIMEOUT, ), timeout=ASYNCIO_TIMEOUT, ) reward = 0.0 try: if hasattr(result, "text") and result.text: lines = result.text.strip().split("\n") if lines: try: reward = float(lines[-1]) except ValueError: try: reward = float(result.text.strip()) except ValueError: pass elif hasattr(result, "stdout") and result.stdout: lines = result.stdout.strip().split("\n") if lines: try: reward = float(lines[-1]) except ValueError: pass except (ValueError, AttributeError): pass return reward except asyncio.TimeoutError: return 0.0 except Exception: return 0.0 finally: if sandbox: try: await asyncio.to_thread(sandbox.close) await asyncio.to_thread(sandbox.shutdown) except Exception: pass def get_provider(provider_type: str = "e2b", **kwargs) -> CodeExecutionProvider: """Factory function to get the appropriate code execution provider. Args: provider_type: Type of provider to use ("e2b", "morph") **kwargs: Additional arguments to pass to the provider Returns: An instance of CodeExecutionProvider """ num_parallel = kwargs.pop("num_parallel", 2) if provider_type == "e2b": # Extract E2B-specific arguments e2b_router_url = kwargs.pop("e2b_router_url", None) return E2BProvider( num_parallel=num_parallel, e2b_router_url=e2b_router_url, ) elif provider_type == "morph": # Extract Morph-specific arguments morph_router_url = kwargs.pop("morph_router_url", None) return MorphProvider( num_parallel=num_parallel, morph_router_url=morph_router_url, ) else: raise ValueError(f"Unknown provider type: {provider_type}")
open-r1/src/open_r1/utils/code_providers.py/0
{ "file_path": "open-r1/src/open_r1/utils/code_providers.py", "repo_id": "open-r1", "token_count": 6087 }
228
import os def init_wandb_training(training_args): """ Helper function for setting up Weights & Biases logging tools. """ if training_args.wandb_entity is not None: os.environ["WANDB_ENTITY"] = training_args.wandb_entity if training_args.wandb_project is not None: os.environ["WANDB_PROJECT"] = training_args.wandb_project if training_args.wandb_run_group is not None: os.environ["WANDB_RUN_GROUP"] = training_args.wandb_run_group
open-r1/src/open_r1/utils/wandb_logging.py/0
{ "file_path": "open-r1/src/open_r1/utils/wandb_logging.py", "repo_id": "open-r1", "token_count": 196 }
229
- title: Get started sections: - local: index title: 🤗 PEFT - local: quicktour title: Quicktour - local: install title: Installation - title: Tutorial sections: - local: tutorial/peft_model_config title: Configurations and models - local: tutorial/peft_integrations title: Integrations - title: PEFT method guides sections: - local: task_guides/prompt_based_methods title: Prompt-based methods - local: task_guides/lora_based_methods title: LoRA methods - local: task_guides/ia3 title: IA3 - title: Developer guides sections: - local: developer_guides/model_merging title: Model merging - local: developer_guides/quantization title: Quantization - local: developer_guides/lora title: LoRA - local: developer_guides/custom_models title: Custom models - local: developer_guides/low_level_api title: Adapter injection - local: developer_guides/mixed_models title: Mixed adapter types - local: developer_guides/torch_compile title: torch.compile - local: developer_guides/contributing title: Contribute to PEFT - local: developer_guides/troubleshooting title: Troubleshooting - local: developer_guides/checkpoint title: PEFT checkpoint format - title: 🤗 Accelerate integrations sections: - local: accelerate/deepspeed title: DeepSpeed - local: accelerate/fsdp title: Fully Sharded Data Parallel - title: Conceptual guides sections: - local: conceptual_guides/adapter title: Adapters - local: conceptual_guides/prompting title: Soft prompts - local: conceptual_guides/ia3 title: IA3 - local: conceptual_guides/oft title: OFT/BOFT - sections: - sections: - local: package_reference/auto_class title: AutoPeftModel - local: package_reference/peft_model title: PEFT model - local: package_reference/peft_types title: PEFT types - local: package_reference/config title: Configuration - local: package_reference/tuners title: Tuner title: Main classes - sections: - local: package_reference/adalora title: AdaLoRA - local: package_reference/ia3 title: IA3 - local: package_reference/llama_adapter title: Llama-Adapter - local: package_reference/loha title: LoHa - local: package_reference/lokr title: LoKr - local: package_reference/lora title: LoRA - local: package_reference/xlora title: X-LoRA - local: package_reference/adapter_utils title: LyCORIS - local: package_reference/multitask_prompt_tuning title: Multitask Prompt Tuning - local: package_reference/oft title: OFT - local: package_reference/boft title: BOFT - local: package_reference/poly title: Polytropon - local: package_reference/p_tuning title: P-tuning - local: package_reference/prefix_tuning title: Prefix tuning - local: package_reference/prompt_tuning title: Prompt tuning - local: package_reference/layernorm_tuning title: Layernorm tuning - local: package_reference/vera title: VeRA - local: package_reference/fourierft title: FourierFT - local: package_reference/vblora title: VB-LoRA - local: package_reference/hra title: HRA - local: package_reference/cpt title: CPT - local: package_reference/bone title: Bone - local: package_reference/trainable_tokens title: Trainable Tokens - local: package_reference/randlora title: RandLora - local: package_reference/shira title: SHiRA - local: package_reference/c3a title: C3A - local: package_reference/miss title: MiSS - local: package_reference/road title: RoAd title: Adapters - sections: - local: package_reference/merge_utils title: Model merge - local: package_reference/helpers title: Helpers - local: package_reference/hotswap title: Hotswapping adapters title: Utilities title: API reference
peft/docs/source/_toctree.yml/0
{ "file_path": "peft/docs/source/_toctree.yml", "repo_id": "peft", "token_count": 1519 }
230
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Troubleshooting If you encounter any issue when using PEFT, please check the following list of common issues and their solutions. ## Examples don't work Examples often rely on the most recent package versions, so please ensure they're up-to-date. In particular, check the following package versions: - `peft` - `transformers` - `accelerate` - `torch` In general, you can update the package version by running this command inside your Python environment: ```bash python -m pip install -U <package_name> ``` Installing PEFT from source is useful for keeping up with the latest developments: ```bash python -m pip install git+https://github.com/huggingface/peft ``` ## Dtype-related issues ### ValueError: Attempting to unscale FP16 gradients This error probably occurred because the model was loaded with `torch_dtype=torch.float16` and then used in an automatic mixed precision (AMP) context, e.g. by setting `fp16=True` in the [`~transformers.Trainer`] class from 🤗 Transformers. The reason is that when using AMP, trainable weights should never use fp16. To make this work without loading the whole model in fp32, add the following to your code: ```python peft_model = get_peft_model(...) # add this: for param in model.parameters(): if param.requires_grad: param.data = param.data.float() # proceed as usual trainer = Trainer(model=peft_model, fp16=True, ...) trainer.train() ``` Alternatively, you can use the [`~utils.cast_mixed_precision_params`] function to correctly cast the weights: ```python from peft import cast_mixed_precision_params peft_model = get_peft_model(...) cast_mixed_precision_params(peft_model, dtype=torch.float16) # proceed as usual trainer = Trainer(model=peft_model, fp16=True, ...) trainer.train() ``` <Tip> Starting from PEFT version v0.12.0, PEFT automatically promotes the dtype of adapter weights from `torch.float16` and `torch.bfloat16` to `torch.float32` where appropriate. To _prevent_ this behavior, you can pass `autocast_adapter_dtype=False` to [`~get_peft_model`], to [`~PeftModel.from_pretrained`], and to [`~PeftModel.load_adapter`]. </Tip> ### Selecting the dtype of the adapter Most PEFT methods, like LoRA, work by adding trainable adapter weights. By default, those weights are stored in float32 dtype (fp32), i.e. at a relatively high precision. Therefore, even if the base model is loaded in float16 (fp16) or bfloat16 (bf16), the adapter weights are float32. When the adapter results are calculated during the forward pass, the input will typically be in the dtype of the base model, thus it will be upcast to float32 if necessary, then cast back to the original dtype. If you prefer to have the adapter weights in the lower precision of the base model, i.e. in float16 or bfloat16, you can pass `autocast_adapter_dtype=False` when creating the model ([`~get_peft_model`]) or loading the model ([`~PeftModel.from_pretrained`]). There are some advantages and disadvantages to this: Advantages of half precision adapter: - computation slightly faster - slightly less memory - smaller file size of checkpoint (half the size) Disadvantages of half precision adapter: - slightly worse loss - higher risk of overflow or underflow Note that for most use cases, overall runtime and memory cost will be determined by the size of the base model and by the dataset, while the dtype of the PEFT adapter will only have a small impact. ## Bad results from a loaded PEFT model There can be several reasons for getting a poor result from a loaded PEFT model which are listed below. If you're still unable to troubleshoot the problem, see if anyone else had a similar [issue](https://github.com/huggingface/peft/issues) on GitHub, and if you can't find any, open a new issue. When opening an issue, it helps a lot if you provide a minimal code example that reproduces the issue. Also, please report if the loaded model performs at the same level as the model did before fine-tuning, if it performs at a random level, or if it is only slightly worse than expected. This information helps us identify the problem more quickly. ### Random deviations If your model outputs are not exactly the same as previous runs, there could be an issue with random elements. For example: 1. please ensure it is in `.eval()` mode, which is important, for instance, if the model uses dropout 2. if you use [`~transformers.GenerationMixin.generate`] on a language model, there could be random sampling, so obtaining the same result requires setting a random seed 3. if you used quantization and merged the weights, small deviations are expected due to rounding errors ### Incorrectly loaded model Please ensure that you load the model correctly. A common error is trying to load a _trained_ model with [`get_peft_model`] which is incorrect. Instead, the loading code should look like this: ```python from peft import PeftModel, PeftConfig base_model = ... # to load the base model, use the same code as when you trained it config = PeftConfig.from_pretrained(peft_model_id) peft_model = PeftModel.from_pretrained(base_model, peft_model_id) ``` ### Randomly initialized layers For some tasks, it is important to correctly configure `modules_to_save` in the config to account for randomly initialized layers. As an example, this is necessary if you use LoRA to fine-tune a language model for sequence classification because 🤗 Transformers adds a randomly initialized classification head on top of the model. If you do not add this layer to `modules_to_save`, the classification head won't be saved. The next time you load the model, you'll get a _different_ randomly initialized classification head, resulting in completely different results. PEFT tries to correctly guess the `modules_to_save` if you provide the `task_type` argument in the config. This should work for transformers models that follow the standard naming scheme. It is always a good idea to double check though because we can't guarantee all models follow the naming scheme. When you load a transformers model that has randomly initialized layers, you should see a warning along the lines of: ``` Some weights of <MODEL> were not initialized from the model checkpoint at <ID> and are newly initialized: [<LAYER_NAMES>]. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` The mentioned layers should be added to `modules_to_save` in the config to avoid the described problem. <Tip> As an example, when loading a model that is using the DeBERTa architecture for sequence classification, you'll see a warning that the following weights are newly initialized: `['classifier.bias', 'classifier.weight', 'pooler.dense.bias', 'pooler.dense.weight']`. From this, it follows that the `classifier` and `pooler` layers should be added to: `modules_to_save=["classifier", "pooler"]`. </Tip> ### Extending the vocabulary For many language fine-tuning tasks, extending the model's vocabulary is necessary since new tokens are being introduced. This requires extending the embedding layer to account for the new tokens and, depending on the fine-tuning method, also storing the embedding layer in addition to the adapter weights when saving the adapter. There are a few ways of achieving this ordered by parameter effectiveness: - [trainable tokens](../package_reference/trainable_tokens), train only the specified tokens, optionally store only the updated values - training an adapter on the embedding matrix, optionally store only the updated values - full-finetuning of the embedding layer #### Using trainable tokens Let's start with trainable tokens, in this case its [LoRA integration](../developer_guides/lora#efficiently-train-tokens-alongside-lora). If you're interested in only training the new embeddings and nothing else, refer to the [standalone documentation](../package_reference/trainable_tokens). To enable selective token training of the embedding layer, you'll need to supply the token ids of your newly added tokens via the `trainable_token_indices` parameter. Optionally you can specify which layer to target if there is more than one embedding layer. For a Mistral model this could look like this: ```python new_tokens = ['<think>', '</think>'] tokenizer.add_tokens(new_tokens) base_model.resize_token_embeddings(len(tokenizer)) lora_config = LoraConfig( ..., trainable_token_indices={'embed_tokens': tokenizer.convert_tokens_to_ids(new_tokens)}, ) ``` If your model uses tied weights (such as the `lm_head`), trainable tokens will try to resolve those and keep them updated as well, so in that case there should be no need for adding `modules_to_save=["lm_head"]`. This only works if the model uses the Transformers convention for tying weights. Saving the model with `model.save_pretrained` may save the full embedding matrix instead of only the difference as a precaution because the embedding matrix was resized. To save space you can disable this behavior by setting `save_embedding_layers=False` when calling `save_pretrained`. This is safe to do as long as you don't modify the embedding matrix through other means as well, as such changes will be not tracked by trainable tokens. #### Using an adapter, e.g. LoRA Prepare the embedding layer by adding it to the `target_modules` of your adapter config. For example, the Mistral config could look like this: ```python config = LoraConfig(..., target_modules=["embed_tokens", "lm_head", "q_proj", "v_proj"]) ``` Once added to `target_modules`, PEFT automatically stores the embedding layer when saving the adapter if the model has the [`~transformers.PreTrainedModel.get_input_embeddings`] and [`~transformers.PreTrainedModel.get_output_embeddings`]. This is generally the case for Transformers models. If the model's embedding layer doesn't follow the Transformer's naming scheme but nevertheless implements `get_input_embeddings`, you can still save it by manually passing `save_embedding_layers=True` when saving the adapter: ```python model = get_peft_model(...) # train the model model.save_pretrained("my_adapter", save_embedding_layers=True) ``` For inference, load the base model first and resize it the same way you did before you trained the model. After you've resized the base model, you can load the PEFT checkpoint. For a complete example, please check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb). #### Full fine-tuning Full fine-tuning is more costly in terms of VRAM or storage space but if all else fails, you can fall back to this and see if it works for you. Achieve it by adding the name of the embedding layer to `modules_to_save`. Note that you need to add tied layers as well, e.g. `lm_head`. Example for a Mistral model with LoRA: ```python config = LoraConfig(..., modules_to_save=["embed_tokens", "lm_head"], target_modules=["q_proj", "v_proj"]) ``` ### Getting a warning about "weights not being initialized from the model checkpoint" When you load your PEFT model which has been trained on a task (for example, classification), you may get a warning like: > Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at meta-llama/Llama-3.2-1B and are newly initialized: ['score.weight']. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. Although this looks scary, it is most likely nothing to worry about. This warning comes from Transformers, and it isn't a PEFT specific warning. It lets you know that a randomly initialized classification head (`score`) is attached to the base model, and the head must be trained to produce sensible predictions. When you get this warning _before_ training the model, PEFT automatically takes care of making the classification head trainable if you correctly passed the `task_type` argument to the PEFT config. ```python from peft import LoraConfig, TaskType lora_config = LoraConfig(..., task_type=TaskType.SEQ_CLS) ``` If your classification head does not follow the usual naming conventions from Transformers (which is rare), you have to explicitly tell PEFT the name of the head in `modules_to_save`. ```python lora_config = LoraConfig(..., modules_to_save=["name-of-classification-head"]) ``` To check the name of the classification head, print the model and it should be the last module. If you get this warning from your inference code, i.e. _after_ training the model, when you load the PEFT model, you always have to load the Transformers model first. Since Transformers does not know that you will load PEFT weights afterwards, it still gives the warning. As always, it is best practice to ensure the model works correctly for inference by running some validation on it. ### Check layer and model status Sometimes a PEFT model can end up in a bad state, especially when handling multiple adapters. There can be some confusion around what adapters exist, which one is active, which one is merged, etc. To help investigate this issue, call the [`~peft.PeftModel.get_layer_status`] and the [`~peft.PeftModel.get_model_status`] methods. The [`~peft.PeftModel.get_layer_status`] method gives you a detailed overview of each targeted layer's active, merged, and available adapters. ```python >>> from transformers import AutoModel >>> from peft import get_peft_model, LoraConfig >>> model_id = "google/flan-t5-small" >>> model = AutoModel.from_pretrained(model_id) >>> model = get_peft_model(model, LoraConfig()) >>> model.get_layer_status() [TunerLayerStatus(name='model.encoder.block.0.layer.0.SelfAttention.q', module_type='lora.Linear', enabled=True, active_adapters=['default'], merged_adapters=[], requires_grad={'default': True}, available_adapters=['default']), TunerLayerStatus(name='model.encoder.block.0.layer.0.SelfAttention.v', module_type='lora.Linear', enabled=True, active_adapters=['default'], merged_adapters=[], requires_grad={'default': True}, available_adapters=['default']), ...] >>> model.get_model_status() TunerModelStatus( base_model_type='T5Model', adapter_model_type='LoraModel', peft_types={'default': 'LORA'}, trainable_params=344064, total_params=60855680, num_adapter_layers=48, enabled=True, active_adapters=['default'], merged_adapters=[], requires_grad={'default': True}, available_adapters=['default'], ) ``` In the model state output, you should look out for entries that say `"irregular"`. This means PEFT detected an inconsistent state in the model. For instance, if `merged_adapters="irregular"`, it means that for at least one adapter, it was merged on some target modules but not on others. The inference results will most likely be incorrect as a result. The best way to resolve this issue is to reload the whole model and adapter checkpoint(s). Ensure that you don't perform any incorrect operations on the model, e.g. manually merging adapters on some modules but not others. Convert the layer status into a pandas `DataFrame` for an easier visual inspection. ```python from dataclasses import asdict import pandas as pd df = pd.DataFrame(asdict(layer) for layer in model.get_layer_status()) ``` It is possible to get this information for non-PEFT models if they are using PEFT layers under the hood, but some information like the `base_model_type` or the `peft_types` cannot be determined in that case. As an example, you can call this on a [diffusers](https://huggingface.co/docs/diffusers/index) model like so: ```python >>> import torch >>> from diffusers import StableDiffusionPipeline >>> from peft import get_model_status, get_layer_status >>> path = "runwayml/stable-diffusion-v1-5" >>> lora_id = "takuma104/lora-test-text-encoder-lora-target" >>> pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) >>> pipe.load_lora_weights(lora_id, adapter_name="adapter-1") >>> pipe.load_lora_weights(lora_id, adapter_name="adapter-2") >>> pipe.set_lora_device(["adapter-2"], "cuda") >>> get_layer_status(pipe.text_encoder) [TunerLayerStatus(name='text_model.encoder.layers.0.self_attn.k_proj', module_type='lora.Linear', enabled=True, active_adapters=['adapter-2'], merged_adapters=[], requires_grad={'adapter-1': False, 'adapter-2': True}, available_adapters=['adapter-1', 'adapter-2'], devices={'adapter-1': ['cpu'], 'adapter-2': ['cuda']}), TunerLayerStatus(name='text_model.encoder.layers.0.self_attn.v_proj', module_type='lora.Linear', enabled=True, active_adapters=['adapter-2'], merged_adapters=[], requires_grad={'adapter-1': False, 'adapter-2': True}, devices={'adapter-1': ['cpu'], 'adapter-2': ['cuda']}), ...] >>> get_model_status(pipe.unet) TunerModelStatus( base_model_type='other', adapter_model_type='None', peft_types={}, trainable_params=797184, total_params=861115332, num_adapter_layers=128, enabled=True, active_adapters=['adapter-2'], merged_adapters=[], requires_grad={'adapter-1': False, 'adapter-2': True}, available_adapters=['adapter-1', 'adapter-2'], devices={'adapter-1': ['cpu'], 'adapter-2': ['cuda']}, ) ``` ## Speed ### Loading adapter weights is slow Loading adapters like LoRA weights should generally be fast compared to loading the base model. However, there can be use cases where the adapter weights are quite large or where users need to load a large number of adapters -- the loading time can add up in this case. The reason for this is that the adapter weights are first initialized and then overridden by the loaded weights, which is wasteful. To speed up the loading time, you can pass the `low_cpu_mem_usage=True` argument to [`~PeftModel.from_pretrained`] and [`~PeftModel.load_adapter`]. <Tip> If this option works well across different use cases, it may become the default for adapter loading in the future. </Tip> ## Reproducibility ### Models using batch norm When loading a trained PEFT model where the base model uses batch norm (e.g. `torch.nn.BatchNorm1d` or `torch.nn.BatchNorm2d`), you may find that you cannot reproduce the exact same outputs. This is because the batch norm layers keep track of running stats during training, but these stats are not part of the PEFT checkpoint. Therefore, when you load the PEFT model, the running stats of the base model will be used (i.e. from before training with PEFT). Depending on your use case, this may not be a big deal. If, however, you need your outputs to be 100% reproducible, you can achieve this by adding the batch norm layers to `modules_to_save`. Below is an example of this using resnet and LoRA. Notice that we set `modules_to_save=["classifier", "normalization"]`. We need the `"classifier"` argument because our task is image classification, and we add the `"normalization"` argument to ensure that the batch norm layers are saved in the PEFT checkpoint. ```python from transformers import AutoModelForImageClassification from peft import LoraConfig, get_peft_model model_id = "microsoft/resnet-18" base_model = AutoModelForImageClassification.from_pretrained(self.model_id) config = LoraConfig( target_modules=["convolution"], modules_to_save=["classifier", "normalization"], ), ``` Depending on the type of model you use, the batch norm layers could have different names than `"normalization"`, so please ensure that the name matches your model architecture. ## Version mismatch ### Error while loading the config because of an unexpected keyword argument When you encounter an error like the one shown below, it means the adapter you're trying to load was trained with a more recent version of PEFT than the version you have installed on your system. ``` TypeError: LoraConfig.__init__() got an unexpected keyword argument <argument-name> ``` The best way to resolve this issue is to install the latest PEFT version: ```sh python -m pip install -U PEFT ``` If the adapter was trained from a source install of PEFT (an unreleased version of PEFT), then you also need to install PEFT from source. ```sh python -m pip install -U git+https://github.com/huggingface/peft.git ``` If it is not possible for you to upgrade PEFT, there is a workaround you can try. Assume the error message says that the unknown keyword argument is named `foobar`. Search inside the `adapter_config.json` of this PEFT adapter for the `foobar` entry and delete it from the file. Then save the file and try loading the model again. This solution works most of the time. As long as it is the default value for `foobar`, it can be ignored. However, when it is set to some other value, you will get incorrect results. Upgrading PEFT is the recommended solution.
peft/docs/source/developer_guides/troubleshooting.md/0
{ "file_path": "peft/docs/source/developer_guides/troubleshooting.md", "repo_id": "peft", "token_count": 6447 }
231
PEFT_TYPE="boft" BLOCK_NUM=8 BLOCK_SIZE=0 N_BUTTERFLY_FACTOR=1 ITER_NUM=50000 export RUN_NAME="${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}" export MODEL_NAME="stabilityai/stable-diffusion-2-1" # export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATASET_NAME="oftverse/control-celeba-hq" export CKPT_NAME="checkpoint-${ITER_NUM}" export OUTPUT_DIR="./output/${DATASET_NAME}/${RUN_NAME}/${CKPT_NAME}" export CONTROLNET_PATH="${OUTPUT_DIR}/controlnet/model.safetensors" export UNET_PATH="${OUTPUT_DIR}/unet/${RUN_NAME}" accelerate launch eval.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME \ --controlnet_path=$CONTROLNET_PATH \ --unet_path=$UNET_PATH \ --adapter_name=$RUN_NAME \ --output_dir=$OUTPUT_DIR \ --dataset_name=$DATASET_NAME \ --vis_overlays \
peft/examples/boft_controlnet/eval.sh/0
{ "file_path": "peft/examples/boft_controlnet/eval.sh", "repo_id": "peft", "token_count": 370 }
232
<jupyter_start><jupyter_code>import os import torch from accelerate.logging import get_logger from diffusers import StableDiffusionPipeline from diffusers.utils import check_min_version from peft import PeftModel # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) MODEL_NAME = "stabilityai/stable-diffusion-2-1" # MODEL_NAME="runwayml/stable-diffusion-v1-5" PEFT_TYPE="boft" BLOCK_NUM=8 BLOCK_SIZE=0 N_BUTTERFLY_FACTOR=1 SELECTED_SUBJECT="backpack" EPOCH_IDX = 200 PROJECT_NAME=f"dreambooth_{PEFT_TYPE}" RUN_NAME=f"{SELECTED_SUBJECT}_{PEFT_TYPE}_{BLOCK_NUM}{BLOCK_SIZE}{N_BUTTERFLY_FACTOR}" OUTPUT_DIR=f"./data/output/{PEFT_TYPE}" def get_boft_sd_pipeline( ckpt_dir, base_model_name_or_path=None, epoch=int, dtype=torch.float32, device="auto", adapter_name="default" ): if device == "auto": device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" if base_model_name_or_path is None: raise ValueError("Please specify the base model name or path") pipe = StableDiffusionPipeline.from_pretrained( base_model_name_or_path, torch_dtype=dtype, requires_safety_checker=False ).to(device) load_adapter(pipe, ckpt_dir, epoch, adapter_name) if dtype in (torch.float16, torch.bfloat16): pipe.unet.half() pipe.text_encoder.half() pipe.to(device) return pipe def load_adapter(pipe, ckpt_dir, epoch, adapter_name="default"): unet_sub_dir = os.path.join(ckpt_dir, f"unet/{epoch}", adapter_name) text_encoder_sub_dir = os.path.join(ckpt_dir, f"text_encoder/{epoch}", adapter_name) if isinstance(pipe.unet, PeftModel): pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name) else: pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name) else: pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name) def set_adapter(pipe, adapter_name): pipe.unet.set_adapter(adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) prompt = "a photo of sks backpack on a wooden floor" negative_prompt = "low quality, blurry, unfinished" %%time pipe = get_boft_sd_pipeline(OUTPUT_DIR, MODEL_NAME, EPOCH_IDX, adapter_name=RUN_NAME) %%time image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image # load and reset another adapter # WARNING: requires training DreamBooth with `boft_bias=None` SELECTED_SUBJECT="dog" EPOCH_IDX = 200 RUN_NAME=f"{SELECTED_SUBJECT}_{PEFT_TYPE}_{BLOCK_NUM}{BLOCK_SIZE}{N_BUTTERFLY_FACTOR}" load_adapter(pipe, OUTPUT_DIR, epoch=EPOCH_IDX, adapter_name=RUN_NAME) set_adapter(pipe, adapter_name=RUN_NAME) %%time prompt = "a photo of sks dog running on the beach" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image<jupyter_output><empty_output>
peft/examples/boft_dreambooth/dreambooth_inference.ipynb/0
{ "file_path": "peft/examples/boft_dreambooth/dreambooth_inference.ipynb", "repo_id": "peft", "token_count": 1395 }
233
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DreamBooth fine-tuning with HRA This guide demonstrates how to use Householder reflection adaptation (HRA) method, to fine-tune Dreambooth with `stabilityai/stable-diffusion-2-1` model. HRA provides a new perspective connecting LoRA to OFT and achieves encouraging performance in various downstream tasks. HRA adapts a pre-trained model by multiplying each frozen weight matrix with a chain of r learnable Householder reflections (HRs). HRA can be interpreted as either an OFT adapter or an adaptive LoRA. Consequently, it harnesses the advantages of both strategies, reducing parameters and computation costs while penalizing the loss of pre-training knowledge. For further details on HRA, please consult the [original HRA paper](https://huggingface.co/papers/2405.17484). In this guide we provide a Dreambooth fine-tuning script that is available in [PEFT's GitHub repo examples](https://github.com/huggingface/peft/tree/main/examples/hra_dreambooth). This implementation is adapted from [peft's boft_dreambooth](https://github.com/huggingface/peft/tree/main/examples/boft_dreambooth). You can try it out and fine-tune on your custom images. ## Set up your environment Start by cloning the PEFT repository: ```bash git clone --recursive https://github.com/huggingface/peft ``` Navigate to the directory containing the training scripts for fine-tuning Dreambooth with HRA: ```bash cd peft/examples/hra_dreambooth ``` Set up your environment: install PEFT, and all the required libraries. At the time of writing this guide we recommend installing PEFT from source. The following environment setup should work on A100 and H100: ```bash conda create --name peft python=3.10 conda activate peft conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=11.8 -c pytorch -c nvidia conda install xformers -c xformers pip install -r requirements.txt pip install git+https://github.com/huggingface/peft ``` ## Download the data [dreambooth](https://github.com/google/dreambooth) dataset should have been automatically cloned in the following structure when running the training script. ``` hra_dreambooth ├── data │ └── dreambooth │ └── dataset │ ├── backpack │ └── backpack_dog │ ... ``` You can also put your custom images into `hra_dreambooth/data/dreambooth/dataset`. ## Fine-tune Dreambooth with HRA ```bash class_idx=0 bash ./train_dreambooth.sh $class_idx ``` where the `$class_idx` corresponds to different subjects ranging from 0 to 29. Launch the training script with `accelerate` and pass hyperparameters, as well as LoRa-specific arguments to it such as: - `use_hra`: Enables HRA in the training script. - `hra_r`: the number of HRs (i.e., r) across different layers, expressed in `int`. As r increases, the number of trainable parameters increases, which generally leads to improved performance. However, this also results in higher memory consumption and longer computation times. Therefore, r is usually set to 8. **Note**, please set r to an even number to avoid potential issues during initialization. - `hra_apply_GS`: Applies Gram-Schmidt orthogonalization. Default is `false`. - `hra_bias`: specify if the `bias` parameters should be trained. Can be `none`, `all` or `hra_only`. If you are running this script on Windows, you may need to set the `--num_dataloader_workers` to 0. To learn more about DreamBooth fine-tuning with prior-preserving loss, check out the [Diffusers documentation](https://huggingface.co/docs/diffusers/training/dreambooth#finetuning-with-priorpreserving-loss). ## Generate images with the fine-tuned model To generate images with the fine-tuned model, simply run the jupyter notebook `dreambooth_inference.ipynb` for visualization with `jupyter notebook` under `./examples/hra_dreambooth`.
peft/examples/hra_dreambooth/README.md/0
{ "file_path": "peft/examples/hra_dreambooth/README.md", "repo_id": "peft", "token_count": 1329 }
234
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from datasets import load_dataset from torch.utils.data import DataLoader, Dataset from transformers import AutoModelForVision2Seq, AutoProcessor, BitsAndBytesConfig from peft import LoraConfig, get_peft_model # Let's define the LoraConfig config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", ) # We load our model and processor using `transformers` model = AutoModelForVision2Seq.from_pretrained( "Salesforce/blip2-opt-2.7b", quantization_config=BitsAndBytesConfig(load_in_8bit=True) ) processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") # Get our peft model and print the number of trainable parameters model = get_peft_model(model, config) model.print_trainable_parameters() # Let's load the dataset here! dataset = load_dataset("ybelkada/football-dataset", split="train") class ImageCaptioningDataset(Dataset): def __init__(self, dataset, processor): self.dataset = dataset self.processor = processor def __len__(self): return len(self.dataset) def __getitem__(self, idx): item = self.dataset[idx] encoding = self.processor(images=item["image"], padding="max_length", return_tensors="pt") # remove batch dimension encoding = {k: v.squeeze() for k, v in encoding.items()} encoding["text"] = item["text"] return encoding def collator(batch): # pad the input_ids and attention_mask processed_batch = {} for key in batch[0].keys(): if key != "text": processed_batch[key] = torch.stack([example[key] for example in batch]) else: text_inputs = processor.tokenizer( [example["text"] for example in batch], padding=True, return_tensors="pt" ) processed_batch["input_ids"] = text_inputs["input_ids"] processed_batch["attention_mask"] = text_inputs["attention_mask"] return processed_batch train_dataset = ImageCaptioningDataset(dataset, processor) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=2, collate_fn=collator) optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" model.train() for epoch in range(50): print("Epoch:", epoch) for idx, batch in enumerate(train_dataloader): input_ids = batch.pop("input_ids").to(device) pixel_values = batch.pop("pixel_values").to(device, torch.float16) outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=input_ids) loss = outputs.loss print("Loss:", loss.item()) loss.backward() optimizer.step() optimizer.zero_grad() if idx % 10 == 0: generated_output = model.generate(pixel_values=pixel_values) print(processor.batch_decode(generated_output, skip_special_tokens=True))
peft/examples/int8_training/fine_tune_blip2_int8.py/0
{ "file_path": "peft/examples/int8_training/fine_tune_blip2_int8.py", "repo_id": "peft", "token_count": 1321 }
235
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Optional import torch from datasets import load_dataset from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, DataCollatorForLanguageModeling, Trainer, TrainingArguments, ) from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from peft.optimizers import create_lorafa_optimizer def train_model( base_model_name_or_path: str, dataset_name_or_path: str, output_dir: str, batch_size: int, num_epochs: int, lr: float, cutoff_len: int, quantize: bool, eval_step: int, save_step: int, lora_rank: int, lora_alpha: int, lora_dropout: float, lora_target_modules: Optional[str], lorafa: bool, ): os.environ["TOKENIZERS_PARALLELISM"] = "false" is_bf16_supported = False device_map = "cpu" if torch.cuda.is_available(): is_bf16_supported = torch.cuda.is_bf16_supported() device_map = "cuda" elif torch.xpu.is_available(): is_bf16_supported = torch.xpu.is_bf16_supported() device_map = "xpu" compute_dtype = torch.bfloat16 if is_bf16_supported else torch.float16 # load tokenizer tokenizer = AutoTokenizer.from_pretrained(base_model_name_or_path) # load model if quantize: model = AutoModelForCausalLM.from_pretrained( base_model_name_or_path, quantization_config=BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=False, bnb_4bit_quant_type="nf4", ), torch_dtype=compute_dtype, device_map=device_map, ) # setup for quantized training model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True) else: model = AutoModelForCausalLM.from_pretrained( base_model_name_or_path, torch_dtype=compute_dtype, device_map=device_map ) # LoRA config for the PEFT model if lora_target_modules is not None: if lora_target_modules == "all-linear": target_modules = "all-linear" else: target_modules = lora_target_modules.split(",") else: target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"] lora_config = LoraConfig( r=lora_rank, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=lora_dropout, bias="none", ) # get the peft model with LoRA config model = get_peft_model(model, lora_config) tokenizer.pad_token = tokenizer.eos_token # Load the dataset dataset = load_dataset(dataset_name_or_path) def tokenize_function(examples): inputs = tokenizer(examples["query"], padding="max_length", truncation=True, max_length=cutoff_len) outputs = tokenizer(examples["response"], padding="max_length", truncation=True, max_length=cutoff_len) inputs["labels"] = outputs["input_ids"].copy() return inputs # Tokenize the dataset and prepare for training tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names) dataset = tokenized_datasets["train"].train_test_split(test_size=0.1, shuffle=True, seed=42) train_dataset = dataset["train"] eval_dataset = dataset["test"] # Data collator to dynamically pad the batched examples data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False) # Define training arguments training_args = TrainingArguments( output_dir=output_dir, num_train_epochs=num_epochs, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, warmup_steps=100, weight_decay=0.01, logging_dir="./logs", logging_steps=eval_step, save_steps=save_step, save_total_limit=2, gradient_accumulation_steps=1, bf16=True if compute_dtype == torch.bfloat16 else False, fp16=True if compute_dtype == torch.float16 else False, learning_rate=lr, ) # Here we initialize the LoRA-FA Optimizer # After this, all adapter A will be fixed, only adapter B will be trainable if lorafa: optimizer = create_lorafa_optimizer( model=model, r=lora_rank, lora_alpha=lora_alpha, lr=lr, weight_decay=training_args.weight_decay ) trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator, optimizers=(optimizer, None), ) else: trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator, ) # Start model training trainer.train() # Save the model and tokenizer locally model.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Fine-tune Meta-Llama-3-8B-Instruct with LoRA-FA and PEFT") parser.add_argument( "--base_model_name_or_path", type=str, default="meta-llama/Meta-Llama-3-8B-Instruct", help="Base model name or path", ) parser.add_argument( "--dataset_name_or_path", type=str, default="meta-math/MetaMathQA-40K", help="Dataset name or path" ) parser.add_argument("--output_dir", type=str, help="Output directory for the fine-tuned model") parser.add_argument("--batch_size", type=int, default=1, help="Batch size") parser.add_argument("--num_epochs", type=int, default=3, help="Number of training epochs") parser.add_argument("--lr", type=float, default=7e-5, help="Learning rate") parser.add_argument("--cutoff_len", type=int, default=1024, help="Cutoff length for tokenization") parser.add_argument("--quantize", action="store_true", help="Use quantization") parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval") parser.add_argument("--save_step", type=int, default=100, help="Save step interval") parser.add_argument("--lora_rank", type=int, default=16, help="LoRA rank") parser.add_argument("--lora_alpha", type=int, default=32, help="LoRA alpha") parser.add_argument("--lora_dropout", type=float, default=0.05, help="LoRA dropout rate") parser.add_argument( "--lora_target_modules", type=str, default=None, help="Comma-separated list of target modules for LoRA" ) parser.add_argument("--lorafa", action="store_true", help="Use LoRA-FA Optimizer") args = parser.parse_args() train_model( base_model_name_or_path=args.base_model_name_or_path, dataset_name_or_path=args.dataset_name_or_path, output_dir=args.output_dir, batch_size=args.batch_size, num_epochs=args.num_epochs, lr=args.lr, cutoff_len=args.cutoff_len, quantize=args.quantize, eval_step=args.eval_step, save_step=args.save_step, lora_rank=args.lora_rank, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, lora_target_modules=args.lora_target_modules, lorafa=args.lorafa, )
peft/examples/lorafa_finetune/lorafa_finetuning.py/0
{ "file_path": "peft/examples/lorafa_finetune/lorafa_finetuning.py", "repo_id": "peft", "token_count": 3415 }
236
# QALoRA: Quantization-Aware Low-Rank Adaptation ## Introduction [QALoRA](https://huggingface.co/papers/2309.14717) is a quantization-aware version of Low-Rank Adaptation that enables efficient fine-tuning of quantized large language models. QALoRA uses input feature pooling and a specialized grouping technique to work with quantized weights, significantly reducing memory requirements while preserving performance. QALoRA enables fine-tuning of models that would otherwise be too large for consumer GPUs. In PEFT it only works for GPTQ. ## Quick start ```python import torch from peft import LoraConfig, get_peft_model from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer from datasets import load_dataset # Load a quantized model (example with GPTQ quantization) model = AutoModelForCausalLM.from_pretrained( "TheBloke/Llama-2-7b-GPTQ", revision="gptq-4bit-32g-actorder_True", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("TheBloke/Llama-2-7b-GPTQ") dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") # Configure QALoRA parameters lora_config = LoraConfig( use_qalora=True, qalora_group_size=8, r=16, lora_alpha=32, target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], lora_dropout=0.05, ) # Create the PEFT model peft_model = get_peft_model(model, lora_config) # Set up trainer and train trainer = Trainer( model=peft_model, train_dataset=dataset, args=TrainingArguments( per_device_train_batch_size=1, gradient_accumulation_steps=4, num_train_epochs=3, learning_rate=3e-4, output_dir="qalora-llama-2-7b" ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) trainer.train() peft_model.save_pretrained("qalora-llama-2-7b") ``` To use QALoRA, simply set `use_qalora = True` and specify a `qalora_group_size` in your LoRA configuration. The group size controls the memory/performance tradeoff - smaller values use less memory but may affect performance. ## Command Line Examples Run the finetuning script with a GPTQ quantized model: You can customize the pooling group size (default is 16): ```bash python examples/qalora_finetuning/qalora_gptq_finetuning.py \ --base_model TheBloke/Llama-2-7b-GPTQ \ --use_qalora \ --qalora_group_size 32 ``` ### Full example of the script ```bash python qalora_gptq_finetuning.py \ --base_model "TheBloke/Llama-2-13b-GPTQ" \ --output_dir "PATH_TO_OUTPUT_DIR" \ --batch_size 1 \ --num_epochs 3 \ --learning_rate 3e-4 \ --cutoff_len 512 \ --use_qalora \ --qalora_group_size 32 \ --eval_step 10 \ --save_step 100 \ --device "auto" \ --lora_r 16 \ --lora_alpha 32 \ --lora_dropout 0.05 \ --lora_target_modules "q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj" \ --push_to_hub ``` ## Use the model on 🤗 You can load and use the finetuned model like any other PEFT model: ```python from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer # Load the base quantized model base_model = AutoModelForCausalLM.from_pretrained( "TheBloke/Llama-2-7b-GPTQ", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("TheBloke/Llama-2-7b-GPTQ") # Load the PEFT adapter peft_model_id = "YOUR_HF_REPO" model = PeftModel.from_pretrained(base_model, peft_model_id) # Generate text input_text = "Hello, I'm a language model" inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_length=100) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` ## QALoRA vs. LoRA QALoRA offers several advantages over standard LoRA: 1. **Memory efficiency**: QALoRA works directly with quantized models, reducing memory usage by up to 60-70% compared to standard LoRA. 2. **Hardware accessibility**: Enables fine-tuning of larger models (13B, 70B) on consumer GPUs that would be impossible with standard LoRA. 3. **Performance preservation**: Despite quantization, QALoRA can achieve comparable performance to full-precision LoRA in many tasks. ## Implementation Details: Merging with Quantized Models > **Note:** The current implementation differs from the original QA-LoRA paper's approach. While the QA-LoRA paper describes a direct weight modification technique using "beta shift" to modify quantized weights without full dequantization, this implementation uses a different approach: 1. The quantized model is first dequantized to full precision 2. The QALoRA adapter weights are then merged with the dequantized model 3. The merged model must be re-quantized if quantization is still desired ### Memory Considerations This process requires significant memory (enough to hold the full dequantized model) and additional computation for the re-quantization step. For large models, this may not be possible on consumer hardware. For most use cases, we recommend keeping the base quantized model and the QALoRA adapter separate, loading them with `PeftModel.from_pretrained()` as shown in the usage example above. This approach maintains the memory efficiency benefits of quantization throughout the deployment pipeline. ## Citation ``` @article{dettmers2023qlora, title={QLoRA: Efficient Finetuning of Quantized LLMs}, author={Dettmers, Tim and Pagnoni, Artidoro and Holtzman, Ari and Zettlemoyer, Luke}, journal={arXiv preprint arXiv:2305.14314}, year={2023} } @article{xu2023qalora, title={QA-LoRA: Quantization-Aware Low-Rank Adaptation of Large Language Models}, author={Xu, Yuhui and Liu, Lingxi and Rao, Longhui and Zhao, Teng and Xiong, Zhiwei and Gao, Mingkui}, journal={arXiv preprint arXiv:2309.14717}, year={2023} } ```
peft/examples/qalora_finetuning/README.md/0
{ "file_path": "peft/examples/qalora_finetuning/README.md", "repo_id": "peft", "token_count": 2020 }
237
accelerate launch --config_file "configs/fsdp_config.yaml" train.py \ --seed 100 \ --model_name_or_path "meta-llama/Llama-2-70b-hf" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ --hub_strategy "every_save" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "mistral-sft-lora-fsdp" \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --gradient_accumulation_steps 4 \ --gradient_checkpointing True \ --use_reentrant False \ --dataset_text_field "content" \ --use_flash_attn True \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "all-linear" \ --use_4bit_quantization False
peft/examples/sft/run_peft_fsdp.sh/0
{ "file_path": "peft/examples/sft/run_peft_fsdp.sh", "repo_id": "peft", "token_count": 453 }
238
# X-LoRA examples ## `xlora_inference_mistralrs.py` Perform inference of an X-LoRA model using the inference engine mistral.rs. Mistral.rs supports many base models besides Mistral, and can load models directly from saved LoRA checkpoints. Check out [adapter model docs](https://github.com/EricLBuehler/mistral.rs/blob/master/docs/ADAPTER_MODELS.md) and the [models support matrix](https://github.com/EricLBuehler/mistral.rs?tab=readme-ov-file#support-matrix). Mistral.rs features X-LoRA support and incorporates techniques such as a dual-KV cache, continuous batching, Paged Attention, and optional non granular scalings, will allow vastly improved throughput. Links: - Installation: https://github.com/EricLBuehler/mistral.rs/blob/master/mistralrs-pyo3/README.md - Runnable example: https://github.com/EricLBuehler/mistral.rs/blob/master/examples/python/xlora_zephyr.py - Adapter model docs and making the ordering file: https://github.com/EricLBuehler/mistral.rs/blob/master/docs/ADAPTER_MODELS.md
peft/examples/xlora/README.md/0
{ "file_path": "peft/examples/xlora/README.md", "repo_id": "peft", "token_count": 320 }
239
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ All utilities not related to data handling. """ import enum import json import os import platform import subprocess import tempfile import warnings from dataclasses import asdict, dataclass from decimal import Decimal, DivisionByZero, InvalidOperation from typing import Any, Callable, Literal, Optional import bitsandbytes import datasets import huggingface_hub import numpy as np import torch import transformers from torch import nn from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, get_cosine_schedule_with_warmup, ) import peft from peft import PeftConfig, get_peft_model, prepare_model_for_kbit_training from peft.optimizers import create_lorafa_optimizer, create_loraplus_optimizer from peft.utils import infer_device, SAFETENSORS_WEIGHTS_NAME device = infer_device() if device not in ["cuda", "xpu"]: raise RuntimeError("CUDA or XPU is not available, currently only CUDA or XPU is supported") ACCELERATOR_MEMORY_INIT_THRESHOLD = 500 * 2**20 # 500MB FILE_NAME_DEFAULT_TRAIN_PARAMS = os.path.join(os.path.dirname(__file__), "default_training_params.json") FILE_NAME_TRAIN_PARAMS = "training_params.json" # specific params for this experiment # main results RESULT_PATH = os.path.join(os.path.dirname(__file__), "results") # testing results RESULT_PATH_TEST = os.path.join(os.path.dirname(__file__), "temporary_results") # cancelled results RESULT_PATH_CANCELLED = os.path.join(os.path.dirname(__file__), "cancelled_results") hf_api = huggingface_hub.HfApi() WARMUP_STEP_RATIO = 0.1 @dataclass class TrainConfig: """All configuration parameters associated with training the model Args: model_id: The model identifier dtype: The data type to use for the model max_seq_length: The maximum sequence length batch_size: The batch size for training batch_size_eval: The batch size for eval/test, can be much higher than for training max_steps: The maximum number of steps to train for eval_steps: The number of steps between evaluations compile: Whether to compile the model query_template: The template for the query seed: The random seed grad_norm_clip: The gradient norm clipping value (set to 0 to skip) optimizer_type: The name of a torch optimizer (e.g. AdamW) or a PEFT method ("lora+", "lora-fa") optimizer_kwargs: The optimizer keyword arguments (lr etc.) lr_scheduler: The learning rate scheduler (currently only None or 'cosine' are supported) use_amp: Whether to use automatic mixed precision autocast_adapter_dtype: Whether to cast adapter dtype to float32, same argument as in PEFT generation_kwargs: Arguments passed to transformers GenerationConfig (used in evaluation) attn_implementation: The attention implementation to use (if any), see transformers docs """ model_id: str dtype: Literal["float32", "float16", "bfloat16", "int8", "int4"] max_seq_length: int batch_size: int batch_size_eval: int max_steps: int eval_steps: int compile: bool query_template: str seed: int grad_norm_clip: float # set to 0 to skip optimizer_type: str optimizer_kwargs: dict[str, Any] lr_scheduler: Optional[Literal["cosine"]] use_amp: bool autocast_adapter_dtype: bool generation_kwargs: dict[str, Any] attn_implementation: Optional[str] def __post_init__(self) -> None: if not isinstance(self.model_id, str): raise ValueError(f"Invalid model_id: {self.model_id}") if self.dtype not in ["float32", "float16", "bfloat16", "int8", "int4"]: raise ValueError(f"Invalid dtype: {self.dtype}") if self.max_seq_length < 0: raise ValueError(f"Invalid max_seq_length: {self.max_seq_length}") if self.batch_size <= 0: raise ValueError(f"Invalid batch_size: {self.batch_size}") if self.batch_size_eval <= 0: raise ValueError(f"Invalid eval batch_size: {self.batch_size_eval}") if self.max_steps <= 0: raise ValueError(f"Invalid max_steps: {self.max_steps}") if self.eval_steps <= 0: raise ValueError(f"Invalid eval_steps: {self.eval_steps}") if self.eval_steps > self.max_steps: raise ValueError(f"Invalid eval_steps: {self.eval_steps} > max_steps: {self.max_steps}") if self.grad_norm_clip < 0: raise ValueError(f"Invalid grad_norm_clip: {self.grad_norm_clip}") if self.optimizer_type not in ["lora+", "lora-fa"] and not hasattr(torch.optim, self.optimizer_type): raise ValueError(f"Invalid optimizer_type: {self.optimizer_type}") if self.lr_scheduler not in [None, "cosine"]: raise ValueError(f"Invalid lr_scheduler: {self.lr_scheduler}, must be None or 'cosine'") if "{query}" not in self.query_template: raise ValueError("Invalid query_template, must contain '{query}'") def validate_experiment_path(path: str) -> str: # the experiment path should take the form of ./experiments/<peft-method>/<experiment-name> # e.g. ./experiments/lora/rank32 # it should contain: # - adapter_config.json # - optional: training_params.json if not os.path.exists(FILE_NAME_DEFAULT_TRAIN_PARAMS): raise FileNotFoundError( f"Missing default training params file '{FILE_NAME_DEFAULT_TRAIN_PARAMS}' in the ./experiments directory" ) if not os.path.exists(path): raise FileNotFoundError(f"Path {path} does not exist") # check path structure path_parts = path.rstrip(os.path.sep).split(os.path.sep) if (len(path_parts) != 3) or (path_parts[-3] != "experiments"): raise ValueError( f"Path {path} does not have the correct structure, should be ./experiments/<peft-method>/<experiment-name>" ) experiment_name = os.path.join(*path_parts[-2:]) return experiment_name def get_train_config(path: str) -> TrainConfig: # first, load the default params, then update with experiment-specific params with open(FILE_NAME_DEFAULT_TRAIN_PARAMS) as f: default_config_kwargs = json.load(f) config_kwargs = {} if os.path.exists(path): with open(path) as f: config_kwargs = json.load(f) config_kwargs = {**default_config_kwargs, **config_kwargs} return TrainConfig(**config_kwargs) def init_accelerator() -> int: torch_accelerator_module = getattr(torch, device, torch.cuda) torch.manual_seed(0) torch_accelerator_module.reset_peak_memory_stats() torch_accelerator_module.manual_seed_all(0) # might not be necessary, but just to be sure nn.Linear(1, 1).to(device) accelerator_memory_init = torch_accelerator_module.max_memory_reserved() if accelerator_memory_init > ACCELERATOR_MEMORY_INIT_THRESHOLD: raise RuntimeError( f"{device} memory usage at start is too high: {accelerator_memory_init // 2**20}MB, please ensure that no other " f"processes are running on {device}." ) torch_accelerator_module.reset_peak_memory_stats() accelerator_memory_init = torch_accelerator_module.max_memory_reserved() return accelerator_memory_init def get_tokenizer(*, model_id: str, max_seq_length: int): tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.model_max_length = max_seq_length if not tokenizer.pad_token: tokenizer.pad_token = tokenizer.eos_token return tokenizer def get_base_model( *, model_id: str, dtype: Literal["float32", "float16", "bfloat16", "int8", "int4"], compile: bool, attn_implementation: Optional[str], ) -> nn.Module: kwargs: dict[str, Any] = { "pretrained_model_name_or_path": model_id, "device_map": device, "attn_implementation": attn_implementation, } if dtype == "int4": quant_config = BitsAndBytesConfig(load_in_4bit=True) kwargs["quantization_config"] = quant_config elif dtype == "int8": quant_config = BitsAndBytesConfig(load_in_8bit=True) kwargs["quantization_config"] = quant_config elif dtype == "bfloat16": kwargs["torch_dtype"] = torch.bfloat16 elif dtype == "float16": kwargs["torch_dtype"] = torch.float16 elif dtype != "float32": raise ValueError(f"Invalid dtype: {dtype}") model = AutoModelForCausalLM.from_pretrained(**kwargs) if dtype in ["int8", "int4"]: model = prepare_model_for_kbit_training(model) if compile: model = torch.compile(model) return model def get_model( *, model_id: str, dtype: Literal["float32", "float16", "bfloat16", "int8", "int4"], compile: bool, attn_implementation: Optional[str], peft_config: Optional[PeftConfig], autocast_adapter_dtype: bool, ) -> nn.Module: base_model = get_base_model( model_id=model_id, dtype=dtype, compile=compile, attn_implementation=attn_implementation ) if peft_config is None: model = base_model else: model = get_peft_model(base_model, peft_config, autocast_adapter_dtype=autocast_adapter_dtype) return model class DummyScheduler: # if no lr scheduler is being used def __init__(self, lr): self.lr = lr def get_last_lr(self): return [self.lr] def step(self): pass def get_optimizer_and_scheduler( model, *, optimizer_type: str, max_steps: int, lr_scheduler_arg: Optional[Literal["cosine"]], **optimizer_kwargs ) -> tuple[torch.optim.Optimizer, Any]: if optimizer_type == "lora+": optimizer = create_loraplus_optimizer(model, optimizer_cls=torch.optim.AdamW, **optimizer_kwargs) elif optimizer_type == "lora-fa": optimizer = create_lorafa_optimizer(model, **optimizer_kwargs) else: cls = getattr(torch.optim, optimizer_type) optimizer = cls(model.parameters(), **optimizer_kwargs) if lr_scheduler_arg == "cosine": warmup_steps = int(WARMUP_STEP_RATIO * max_steps) lr_scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps ) elif lr_scheduler_arg is None: lr_scheduler = DummyScheduler(optimizer_kwargs["lr"]) else: raise ValueError(f"Invalid lr_scheduler argument: {lr_scheduler_arg}") return optimizer, lr_scheduler class BucketIterator: """ Iterator that yields batches of data from a torch Dataset, grouped in buckets by sequence length The iterator will yield batches of size `batch_size`, where the samples in each batch are sorted by sequence length. This is done to minimize the amount of padding required for each batch. To avoid sorting the entire dataset and thus introducing a bias, the dataset is first split into buckets of size `batch_size * bucket_factor`. Args: ds: The torch Dataset to iterate over batch_size: The batch size bucket_factor: The factor by which to multiply the batch size to determine the bucket size delete_cols: The columns to delete from the dataset before yielding a batch """ def __init__(self, ds, *, batch_size: int, bucket_factor: int, delete_cols: list[str]) -> None: self.ds = ds self.batch_size = batch_size self.bucket_factor = bucket_factor self.delete_cols = set(delete_cols) assert self.bucket_factor > 0, "bucket_factor must be greater than 0" def _batch_iterator(self, bucket): tokens_per_sample_bucket = torch.tensor([len(i) for i in bucket["input_ids"]]) # sort long to short instead to encounter possible OOM errors as early as possible sorted = torch.argsort(tokens_per_sample_bucket, descending=True) cls = type(bucket) # conserve the type returned by the ds bucket = {k: [v[i] for i in sorted] for k, v in bucket.items() if k not in self.delete_cols} num_samples = len(bucket["input_ids"]) for j in range(0, num_samples, self.batch_size): batch = {k: v[j : j + self.batch_size] for k, v in bucket.items()} yield cls(batch) def __iter__(self): bucket_size = self.batch_size * self.bucket_factor for i in range(0, len(self.ds), bucket_size): bucket = self.ds[i : i + bucket_size] yield from self._batch_iterator(bucket) # if there is a remainder, we yield the last batch if len(self.ds) % bucket_size != 0: bucket = self.ds[-(len(self.ds) % bucket_size) :] yield from self._batch_iterator(bucket) def get_file_size( model: nn.Module, *, peft_config: Optional[PeftConfig], clean: bool, print_fn: Callable[..., None] ) -> int: file_size = 99999999 # set a default dummy value if peft_config is not None: try: with tempfile.TemporaryDirectory(ignore_cleanup_errors=True, delete=clean) as tmp_dir: model.save_pretrained(tmp_dir) stat = os.stat(os.path.join(tmp_dir, SAFETENSORS_WEIGHTS_NAME)) file_size = stat.st_size if not clean: print_fn(f"Saved PEFT checkpoint to {tmp_dir}") except Exception as exc: print(f"Failed to save PEFT checkpoint due to the following error: {exc}") else: print_fn("Not saving the fully fine-tuned model because it's too big, estimating the size instead") try: num_params = model.num_parameters() dtype_size = next(model.parameters()).element_size() file_size = num_params * dtype_size except Exception as exc: print(f"Failed to determine file size for fully finetuned model because of: {exc}") return file_size ################## # ANSWER PARSING # ################## def parse_answer(text: str) -> Optional[str]: """ A label/prediction can look like this: Question: If the magnitude of vector v is equal to 4, what is the dot product of vector v with itself?. Think step by step Answer: The dot product of a vector with itself is equal to the square of its magnitude. So, the dot product of vector v with itself is equal to $4^2 = \boxed{16}$.The answer is: 16 We want to extract '16' from this string. """ # This implementation is based on sampling meta-llama/Llama-3.1-8B-Instruct. It may not work for other models. candidate_delimiters = [ # MetaMath: "The answer is: ", "The answer is ", "The final answer is: ", "The final answer is ", # GSM8K: "#### ", ] text = text.strip() text = text.rstrip(".!?") for delimiter in candidate_delimiters: if delimiter in text: break else: # no match return None text = text.rpartition(delimiter)[-1].strip() # if a new paragraph follows after the final answer, we want to remove it text = text.split("\n", 1)[0] # note: we can just remove % here since the GSM8K dataset just omits it, i.e. 50% -> 50, no need to divide by 100 text = text.strip(" .!?$%") return text def convert_to_decimal(s: Optional[str]) -> Optional[Decimal]: """ Converts a string representing a number to a Decimal. The string may be: - A simple number (e.g., "13", "65.33") - A fraction (e.g., "20/14") """ if s is None: return None try: s = s.strip() # Check if the string represents a fraction. if "/" in s: parts = s.split("/") if len(parts) != 2: return None numerator = Decimal(parts[0].strip()) denominator = Decimal(parts[1].strip()) if denominator == 0: return None value = numerator / denominator else: # Parse as a regular decimal or integer string. value = Decimal(s) return value except (DivisionByZero, InvalidOperation, ValueError): return None def get_accuracy(*, predictions: list[str], responses: list[str]) -> float: if len(predictions) != len(responses): raise ValueError(f"Prediction length mismatch: {len(predictions)} != {len(responses)}") y_true: list[str | float | None] = [] y_pred: list[str | float | None] = [] for prediction, response in zip(predictions, responses): parsed_prediction = parse_answer(prediction) parsed_response = parse_answer(response) if parsed_response is None: raise ValueError(f"Error encountered while trying to parse response: {response}") decimal_prediction = convert_to_decimal(parsed_prediction) decimal_answer = convert_to_decimal(parsed_response) if decimal_prediction is not None: y_pred.append(float(decimal_prediction)) elif parsed_prediction is not None: y_pred.append(parsed_prediction) else: y_pred.append(None) # we convert decimals to float so that stuff like this works: # float(convert_to_decimal('20/35')) == float(convert_to_decimal('0.5714285714285714')) if decimal_answer is not None: y_true.append(float(decimal_answer)) elif parsed_prediction is not None: y_true.append(parsed_response) else: y_true.append(None) correct: list[bool] = [] for true, pred in zip(y_true, y_pred): if (true is not None) and (pred is not None): correct.append(true == pred) else: correct.append(False) accuracy = sum(correct) / len(correct) return accuracy ########### # LOGGING # ########### def get_base_model_info(model_id: str) -> Optional[huggingface_hub.ModelInfo]: try: return hf_api.model_info(model_id) except Exception as exc: warnings.warn(f"Could not retrieve model info, failed with error {exc}") return None def get_dataset_info(dataset_id: str) -> Optional[huggingface_hub.DatasetInfo]: try: return hf_api.dataset_info(dataset_id) except Exception as exc: warnings.warn(f"Could not retrieve dataset info, failed with error {exc}") return None def get_git_hash(module) -> Optional[str]: if "site-packages" in module.__path__[0]: return None return subprocess.check_output("git rev-parse HEAD".split(), cwd=os.path.dirname(module.__file__)).decode().strip() def get_package_info() -> dict[str, Optional[str]]: """Get the package versions and commit hashes of transformers, peft, datasets, bnb, and torch""" package_info = { "transformers-version": transformers.__version__, "transformers-commit-hash": get_git_hash(transformers), "peft-version": peft.__version__, "peft-commit-hash": get_git_hash(peft), "datasets-version": datasets.__version__, "datasets-commit-hash": get_git_hash(datasets), "bitsandbytes-version": bitsandbytes.__version__, "bitsandbytes-commit-hash": get_git_hash(bitsandbytes), "torch-version": torch.__version__, "torch-commit-hash": get_git_hash(torch), } return package_info def get_system_info() -> dict[str, str]: device = infer_device() torch_accelerator_module = getattr(torch, device, torch.cuda) system_info = { "system": platform.system(), "release": platform.release(), "version": platform.version(), "machine": platform.machine(), "processor": platform.processor(), "accelerator": torch_accelerator_module.get_device_name(0), } return system_info @dataclass class MetaInfo: package_info: dict[str, Optional[str]] system_info: dict[str, str] pytorch_info: str def get_meta_info() -> MetaInfo: meta_info = MetaInfo( package_info=get_package_info(), system_info=get_system_info(), pytorch_info=torch.__config__.show(), ) return meta_info def get_peft_branch() -> str: return ( subprocess.check_output("git rev-parse --abbrev-ref HEAD".split(), cwd=os.path.dirname(peft.__file__)) .decode() .strip() ) class TrainStatus(enum.Enum): FAILED = "failed" SUCCESS = "success" CANCELED = "canceled" @dataclass class TrainResult: status: TrainStatus train_time: float accelerator_memory_reserved_log: list[int] losses: list[float] metrics: list[Any] # TODO error_msg: str num_trainable_params: int num_total_params: int def log_to_console(log_data: dict[str, Any], print_fn: Callable[..., None]) -> None: accelerator_memory_max = log_data["train_info"]["accelerator_memory_max"] accelerator_memory_avg = log_data["train_info"]["accelerator_memory_reserved_avg"] accelerator_memory_reserved_99th = log_data["train_info"]["accelerator_memory_reserved_99th"] time_train = log_data["train_info"]["train_time"] time_total = log_data["run_info"]["total_time"] file_size = log_data["train_info"]["file_size"] print_fn(f"accelerator memory max: {accelerator_memory_max // 2**20}MB") print_fn(f"accelerator memory reserved avg: {accelerator_memory_avg // 2**20}MB") print_fn(f"accelerator memory reserved 99th percentile: {accelerator_memory_reserved_99th // 2**20}MB") print_fn(f"train time: {time_train}s") print_fn(f"total time: {time_total:.2f}s") print_fn(f"file size of checkpoint: {file_size / 2**20:.1f}MB") def log_to_file( *, log_data: dict, save_dir: str, experiment_name: str, timestamp: str, print_fn: Callable[..., None] ) -> None: if save_dir.endswith(RESULT_PATH): file_name = f"{experiment_name.replace(os.path.sep, '--')}.json" else: # For cancelled and temporary runs, we want to include the timestamp, as these runs are not tracked in git, thus # we need unique names to avoid losing history. file_name = f"{experiment_name.replace(os.path.sep, '--')}--{timestamp.replace(':', '-')}.json" file_name = os.path.join(save_dir, file_name) with open(file_name, "w") as f: json.dump(log_data, f, indent=2) print_fn(f"Saved log to: {file_name}") def log_results( *, experiment_name: str, train_result: TrainResult, accelerator_memory_init: int, time_total: float, file_size: int, model_info: Optional[huggingface_hub.ModelInfo], datasets_info: dict[str, Optional[huggingface_hub.DatasetInfo]], start_date: str, train_config: TrainConfig, peft_config: Optional[PeftConfig], print_fn: Callable[..., None], ) -> None: # collect results device = infer_device() torch_accelerator_module = getattr(torch, device, torch.cuda) accelerator_memory_final = torch_accelerator_module.max_memory_reserved() accelerator_memory_avg = int( sum(train_result.accelerator_memory_reserved_log) / len(train_result.accelerator_memory_reserved_log) ) accelerator_memory_reserved_99th = int(np.percentile(train_result.accelerator_memory_reserved_log, 99)) meta_info = get_meta_info() if model_info is not None: model_sha = model_info.sha model_created_at = model_info.created_at.isoformat() else: model_sha = None model_created_at = None dataset_info_log = {} for key, dataset_info in datasets_info.items(): if dataset_info is not None: dataset_sha = dataset_info.sha dataset_created_at = dataset_info.created_at.isoformat() else: dataset_sha = None dataset_created_at = None dataset_info_log[key] = {"sha": dataset_sha, "created_at": dataset_created_at} peft_branch = get_peft_branch() if train_result.status == TrainStatus.CANCELED: save_dir = RESULT_PATH_CANCELLED print_fn("Experiment run was categorized as canceled") elif peft_branch != "main": save_dir = RESULT_PATH_TEST print_fn(f"Experiment run was categorized as a test run on branch {peft_branch}") elif train_result.status == TrainStatus.SUCCESS: save_dir = RESULT_PATH print_fn("Experiment run was categorized as successful run") else: save_dir = tempfile.mkdtemp() print_fn(f"Experiment could not be categorized, writing results to {save_dir}. Please open an issue on PEFT.") if peft_config is None: peft_config_dict: Optional[dict[str, Any]] = None else: peft_config_dict = peft_config.to_dict() for key, value in peft_config_dict.items(): if isinstance(value, set): peft_config_dict[key] = list(value) log_data = { "run_info": { "created_at": start_date, "total_time": time_total, "experiment_name": experiment_name, "peft_branch": peft_branch, "train_config": asdict(train_config), "peft_config": peft_config_dict, "error_msg": train_result.error_msg, }, "train_info": { "accelerator_memory_reserved_avg": accelerator_memory_avg, "accelerator_memory_max": (accelerator_memory_final - accelerator_memory_init), "accelerator_memory_reserved_99th": accelerator_memory_reserved_99th, "train_time": train_result.train_time, "file_size": file_size, "num_trainable_params": train_result.num_trainable_params, "num_total_params": train_result.num_total_params, "status": train_result.status.value, "metrics": train_result.metrics, }, "meta_info": { "model_info": {"sha": model_sha, "created_at": model_created_at}, "dataset_info": dataset_info_log, **asdict(meta_info), }, } log_to_console(log_data, print_fn=print) # use normal print to be able to redirect if so desired log_to_file( log_data=log_data, save_dir=save_dir, experiment_name=experiment_name, timestamp=start_date, print_fn=print_fn )
peft/method_comparison/MetaMathQA/utils.py/0
{ "file_path": "peft/method_comparison/MetaMathQA/utils.py", "repo_id": "peft", "token_count": 10843 }
240
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import sys import time import torch from data import prepare_benchmark_prompts from run import measure_inference_time from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, set_seed from utils import ( BenchmarkConfig, get_memory_usage, init_accelerator, ) def run_base_model_benchmark(benchmark_config: BenchmarkConfig, print_fn=print) -> dict: """Run benchmark for base model only and return results.""" print_fn(f"Running base model benchmark for: {benchmark_config.model_id}") print_fn("Initializing accelerator...") init_accelerator() set_seed(benchmark_config.seed) print_fn(f"Loading base model: {benchmark_config.model_id}") tokenizer = AutoTokenizer.from_pretrained(benchmark_config.model_id) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token model_kwargs = { "device_map": "auto" if (torch.cuda.is_available() or torch.xpu.is_available()) else None, } if benchmark_config.dtype == "float32": model_kwargs["torch_dtype"] = torch.float32 elif benchmark_config.dtype == "float16": model_kwargs["torch_dtype"] = torch.float16 elif benchmark_config.dtype == "bfloat16": model_kwargs["torch_dtype"] = torch.bfloat16 if benchmark_config.use_8bit: model_kwargs["quantization_config"] = BitsAndBytesConfig( load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True ) elif benchmark_config.use_4bit: model_kwargs["quantization_config"] = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=model_kwargs.get("torch_dtype", torch.float16), bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) model = AutoModelForCausalLM.from_pretrained(benchmark_config.model_id, **model_kwargs) ram, accelerator_allocated, accelerator_reserved = get_memory_usage() print_fn(f"Memory after model load - RAM: {ram:.2f}MB, {model.device.type.upper()}: {accelerator_allocated:.2f}MB") print_fn("Preparing benchmark prompts...") prompts = prepare_benchmark_prompts( config=benchmark_config.to_dict(), tokenizer=tokenizer, max_input_length=None, seed=benchmark_config.seed, ) # Measure base model inference for each prompt category print_fn("Measuring base model inference times...") base_inference_results = measure_inference_time( model, tokenizer, prompts, max_new_tokens=benchmark_config.max_new_tokens, num_runs=benchmark_config.num_inference_runs, print_fn=print_fn, category_generation_params=benchmark_config.category_generation_params, ) result = { "model_id": benchmark_config.model_id, "benchmark_config": benchmark_config.to_dict(), "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"), "inference_results": base_inference_results, "memory_info": { "ram_mb": ram, "accelerator_allocated_mb": accelerator_allocated, "accelerator_reserved_mb": accelerator_reserved, }, } return result def save_base_results(result: dict, model_id: str) -> str: """Save base model results with a filename based on model and config.""" base_results_dir = os.path.join(os.path.dirname(__file__), "base_results") os.makedirs(base_results_dir, exist_ok=True) model_name = model_id.replace("/", "_").replace("-", "_") filename = f"base_{model_name}.json" filepath = os.path.join(base_results_dir, filename) with open(filepath, "w") as f: json.dump(result, f, indent=2) return filepath def main(): """Main entry point for the base model benchmark runner.""" parser = argparse.ArgumentParser(description="Run base model benchmarks") parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output") parser.add_argument("--force", "-f", action="store_true", help="Force re-run even if results exist") args = parser.parse_args() print_fn = print if args.verbose else lambda *args, **kwargs: None default_config_path = os.path.join(os.path.dirname(__file__), "default_benchmark_params.json") benchmark_config = BenchmarkConfig.from_json(default_config_path) model_name = benchmark_config.model_id.replace("/", "_").replace("-", "_") base_results_dir = os.path.join(os.path.dirname(__file__), "base_results") filename = f"base_{model_name}.json" filepath = os.path.join(base_results_dir, filename) if os.path.exists(filepath) and not args.force: print(f"Base results already exist at: {filepath}") print("Use --force to re-run the benchmark") return 0 print_fn(f"Running base model benchmark for: {benchmark_config.model_id}") result = run_base_model_benchmark(benchmark_config, print_fn=print_fn) saved_path = save_base_results(result, benchmark_config.model_id) device_type = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" print(f"Base model results saved to: {saved_path}") print("\nBase Model Benchmark Summary:") print(f"Model: {result['model_id']}") print( f"Memory Usage - RAM: {result['memory_info']['ram_mb']:.2f}MB, {device_type.upper()}: {result['memory_info']['accelerator_allocated_mb']:.2f}MB" ) print("\nInference Times by Category:") for category, time_val in result["inference_results"]["inference_times"].items(): time_per_token = result["inference_results"]["time_per_token"][category] tokens = result["inference_results"]["generated_tokens"][category] print(f" {category}: {time_val:.4f}s ({time_per_token:.6f}s/token, {tokens:.1f} tokens)") return 0 if __name__ == "__main__": sys.exit(main())
peft/method_comparison/text_generation_benchmark/run_base.py/0
{ "file_path": "peft/method_comparison/text_generation_benchmark/run_base.py", "repo_id": "peft", "token_count": 2639 }
241
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import importlib.metadata as importlib_metadata import platform from functools import lru_cache import packaging.version import torch @lru_cache def is_bnb_available() -> bool: return importlib.util.find_spec("bitsandbytes") is not None @lru_cache def is_bnb_4bit_available() -> bool: if not is_bnb_available(): return False import bitsandbytes as bnb return hasattr(bnb.nn, "Linear4bit") @lru_cache def is_auto_gptq_available(): if importlib.util.find_spec("auto_gptq") is not None: AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: return True else: raise ImportError( f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" ) @lru_cache def is_gptqmodel_available(): if importlib.util.find_spec("gptqmodel") is not None: GPTQMODEL_MINIMUM_VERSION = packaging.version.parse("2.0.0") OPTIMUM_MINIMUM_VERSION = packaging.version.parse("1.24.0") version_gptqmodel = packaging.version.parse(importlib_metadata.version("gptqmodel")) if GPTQMODEL_MINIMUM_VERSION <= version_gptqmodel: if is_optimum_available(): version_optimum = packaging.version.parse(importlib_metadata.version("optimum")) if OPTIMUM_MINIMUM_VERSION <= version_optimum: return True else: raise ImportError( f"gptqmodel requires optimum version `{OPTIMUM_MINIMUM_VERSION}` or higher. Found version `{version_optimum}`, " f"but only versions above `{OPTIMUM_MINIMUM_VERSION}` are supported" ) else: raise ImportError( f"gptqmodel requires optimum version `{OPTIMUM_MINIMUM_VERSION}` or higher to be installed." ) else: raise ImportError( f"Found an incompatible version of gptqmodel. Found version `{version_gptqmodel}`, " f"but only versions above `{GPTQMODEL_MINIMUM_VERSION}` are supported" ) @lru_cache def is_optimum_available() -> bool: return importlib.util.find_spec("optimum") is not None @lru_cache def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" if importlib.util.find_spec("torch_xla") is not None: if check_device: # We need to check if `xla_device` can be found, will raise a RuntimeError if not try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False @lru_cache def is_aqlm_available(): return importlib.util.find_spec("aqlm") is not None @lru_cache def is_auto_awq_available(): return importlib.util.find_spec("awq") is not None @lru_cache def is_eetq_available(): return importlib.util.find_spec("eetq") is not None @lru_cache def is_hqq_available(): return importlib.util.find_spec("hqq") is not None @lru_cache def is_inc_available(): return importlib.util.find_spec("neural_compressor") is not None @lru_cache def is_torchao_available(): if importlib.util.find_spec("torchao") is None: return False TORCHAO_MINIMUM_VERSION = packaging.version.parse("0.4.0") try: torchao_version = packaging.version.parse(importlib_metadata.version("torchao")) except importlib_metadata.PackageNotFoundError: # Same idea as in diffusers: # https://github.com/huggingface/diffusers/blob/9f06a0d1a4a998ac6a463c5be728c892f95320a8/src/diffusers/utils/import_utils.py#L351-L357 # It's not clear under what circumstances `importlib_metadata.version("torchao")` can raise an error even # though `importlib.util.find_spec("torchao") is not None` but it has been observed, so adding this for # precaution. return False if torchao_version < TORCHAO_MINIMUM_VERSION: raise ImportError( f"Found an incompatible version of torchao. Found version {torchao_version}, " f"but only versions above {TORCHAO_MINIMUM_VERSION} are supported" ) return True @lru_cache def is_xpu_available(check_device=False): """ Checks if XPU acceleration is available and potentially if a XPU is in the environment """ system = platform.system() if system == "Darwin": return False else: if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available() @lru_cache def is_diffusers_available(): return importlib.util.find_spec("diffusers") is not None
peft/src/peft/import_utils.py/0
{ "file_path": "peft/src/peft/import_utils.py", "repo_id": "peft", "token_count": 2441 }
242
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import torch from transformers.pytorch_utils import Conv1D from peft.import_utils import is_bnb_4bit_available, is_bnb_available, is_gptqmodel_available from peft.tuners.lora import LoraConfig, LoraModel from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ( TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, _freeze_adapter, _get_submodules, get_auto_gptq_quant_linear, get_gptqmodel_quant_linear, get_quantization_config, ) from peft.utils.integrations import gather_params_ctx from .gptq import SVDQuantLinear from .layer import AdaLoraLayer, RankAllocator, SVDLinear class AdaLoraModel(LoraModel): """ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper: https://openreview.net/forum?id=lq62uWRJjiY Args: model ([`transformers.PreTrainedModel`]): The model to be adapted. config ([`AdaLoraConfig`]): The configuration of the AdaLora model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Returns: `torch.nn.Module`: The AdaLora model. Example:: >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import LoraConfig, AdaLoraModel, AdaLoraConfig >>> config = AdaLoraConfig( peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", init_r=12, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.01, ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default") **Attributes**: - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model. """ # Note: don't redefine prefix here, it should be inherited from LoraModel def __init__(self, model, config, adapter_name, **kwargs): super().__init__(model, config, adapter_name, **kwargs) traininable_mode_counter = 0 for config in self.peft_config.values(): if not config.inference_mode: traininable_mode_counter += 1 if traininable_mode_counter > 1: raise ValueError( "AdaLoraModel supports only 1 trainable adapter. " "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train." ) if self.peft_config[adapter_name].inference_mode: _freeze_adapter(self.model, adapter_name) else: self.trainable_adapter_name = adapter_name self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name) def _check_new_adapter_config(self, config: LoraConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ super()._check_new_adapter_config(config) traininable_mode_counter = 0 for config_ in self.peft_config.values(): if not config_.inference_mode: traininable_mode_counter += 1 if traininable_mode_counter > 1: raise ValueError( f"{self.__class__.__name__} supports only 1 trainable adapter. " "When using multiple adapters, set inference_mode to True for all adapters except the one " "you want to train." ) def _create_and_replace( self, lora_config, adapter_name, target, target_name, parent, current_key, ): kwargs = { "r": lora_config.init_r, "lora_alpha": lora_config.lora_alpha, "lora_dropout": lora_config.lora_dropout, "fan_in_fan_out": lora_config.fan_in_fan_out, "init_lora_weights": lora_config.init_lora_weights, "loaded_in_8bit": getattr(self.model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(self.model, "is_loaded_in_4bit", False), } if (kwargs["loaded_in_8bit"] or kwargs["loaded_in_4bit"]) and not is_bnb_available(): raise ImportError( "To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. " "You can install it with `pip install bitsandbytes`." ) quantization_config = get_quantization_config(self.model, method="gptq") if quantization_config is not None: kwargs["gptq_quantization_config"] = quantization_config # If it is not an AdaLoraLayer, create a new module, else update it with new adapters if not isinstance(target, AdaLoraLayer): device_map = self.model.hf_device_map if hasattr(self.model, "hf_device_map") else None new_module = self._create_new_module(lora_config, adapter_name, target, device_map=device_map, **kwargs) if adapter_name not in self.active_adapters: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) else: target.update_layer( adapter_name, lora_config.init_r, lora_config.lora_alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) @staticmethod def _create_new_module(lora_config, adapter_name, target, device_map=None, **kwargs): # avoid eager bnb import if is_bnb_available(): import bitsandbytes as bnb from .bnb import SVDLinear8bitLt if is_bnb_4bit_available(): from .bnb import SVDLinear4bit gptq_quantization_config = kwargs.get("gptq_quantization_config", None) if is_gptqmodel_available(): QuantLinear = get_gptqmodel_quant_linear(gptq_quantization_config, device_map=device_map) else: QuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): kwargs.update( { "has_fp16_weights": target_base_layer.state.has_fp16_weights, "threshold": target_base_layer.state.threshold, "index": target_base_layer.index, } ) new_module = SVDLinear8bitLt(target, adapter_name, **kwargs) elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target_base_layer.compute_dtype, "compress_statistics": target_base_layer.weight.compress_statistics, "quant_type": target_base_layer.weight.quant_type, } ) new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs) elif QuantLinear is not None and isinstance(target, QuantLinear): new_module = SVDQuantLinear(target, adapter_name, **kwargs) else: if isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True else: raise ValueError( f"Target module {target} is not supported. " f"Currently, only `torch.nn.Linear` and `Conv1D` are supported." ) new_module = SVDLinear(target, adapter_name, **kwargs) return new_module @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[ model_config["model_type"] ] return peft_config def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def forward(self, *args, **kwargs): outputs = self.model.forward(*args, **kwargs) if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor): # Calculate the orthogonal regularization orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight if orth_reg_weight <= 0: raise ValueError("orth_reg_weight should be greater than 0. ") regu_loss = 0 num_param = 0 for n, p in self.model.named_parameters(): if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n: if p.shape == torch.Size([0]): with gather_params_ctx(p, fwd_module=self): para_cov = p @ p.T if "lora_A" in n else p.T @ p else: para_cov = p @ p.T if "lora_A" in n else p.T @ p I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) # noqa: E741 I.requires_grad = False num_param += 1 regu_loss += torch.norm(para_cov - I, p="fro") if num_param > 0: regu_loss = regu_loss / num_param else: regu_loss = 0 outputs.loss += orth_reg_weight * regu_loss return outputs def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name): lora_config = self.peft_config[adapter_name] for name, rank_idx in rank_pattern.items(): if isinstance(rank_idx, list): rank = sum(rank_idx) elif isinstance(rank_idx, torch.Tensor): rank_idx = rank_idx.view(-1) rank = rank_idx.sum().item() else: raise ValueError("Unexpected type of rank_idx") key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) _, target, _ = _get_submodules(self.model, key) lora_E_weights = target.lora_E[adapter_name][rank_idx] lora_A_weights = target.lora_A[adapter_name][rank_idx] lora_B_weights = target.lora_B[adapter_name][:, rank_idx] ranknum = target.ranknum[adapter_name] target.update_layer( adapter_name, rank, lora_config.lora_alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) with torch.no_grad(): if rank > 0: target.lora_E[adapter_name].copy_(lora_E_weights) target.lora_A[adapter_name].copy_(lora_A_weights) target.lora_B[adapter_name].copy_(lora_B_weights) # The scaling is exactly as the previous target.ranknum[adapter_name].copy_(ranknum) def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name): for name, rank_idx in rank_pattern.items(): rank = sum(rank_idx) prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) for layer in ["lora_E", "lora_A", "lora_B"]: key = f"base_model.model.{prefix}.{layer}.{adapter_name}" if layer != "lora_B": state_dict[key] = ( state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key] ) else: state_dict[key] = ( state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key] ) return state_dict def update_and_allocate(self, global_step): """ This method updates Adalora budget and mask. This should be called in every training step after `loss.backward()` and before `zero_grad()`. `tinit`, `tfinal` and `deltaT` are handled with in the method. Args: global_step (`int`): The current training step, it is used to calculate adalora budget. Example: ```python >>> loss = model(**input).loss >>> loss.backward() >>> optimizer.step() >>> model.base_model.update_and_allocate(i_step) >>> optimizer.zero_grad() ``` """ lora_config = self.peft_config[self.trainable_adapter_name] # Update the importance score and allocate the budget if global_step < lora_config.total_step - lora_config.tfinal: _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step) if rank_pattern: lora_config.rank_pattern = rank_pattern # Finalize the budget allocation elif global_step == lora_config.total_step - lora_config.tfinal: _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True) # for some reason, this freezes the trainable parameters and nothing gets updates # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name) lora_config.rank_pattern = rank_pattern self.rankallocator.reset_ipt() # Currently using inefficient way to mask the unimportant weights using the rank pattern # due to problem mentioned above elif global_step > lora_config.total_step - lora_config.tfinal: self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern) # Pass the function and do forward propagation else: return None def add_weighted_adapter(self, *args, **kwargs): """This method is not supported for AdaLoRA, use LoRA instead.""" raise TypeError(f"{self.__class__.__name__} does not support add_weighted_adapter method.")
peft/src/peft/tuners/adalora/model.py/0
{ "file_path": "peft/src/peft/tuners/adalora/model.py", "repo_id": "peft", "token_count": 7660 }
243
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import asdict from enum import Enum from typing import Optional import torch from torch import nn from tqdm import tqdm from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules, ) from .config import BoneConfig from .layer import BoneLayer, BoneLinear class BoneModel(BaseTuner): """ Creates Householder reflection adaptation (Bone) model from a pretrained model. The method is described in https://huggingface.co/papers/2409.15371 Args: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. config ([`BoneConfig`]): The configuration of the Bone model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Returns: `torch.nn.Module`: The Bone model. Example: ```py >>> from diffusers import StableDiffusionPipeline >>> from peft import BoneModel, BoneConfig >>> config_te = BoneConfig( ... r=8, ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], ... init_weights=True, ... ) >>> config_unet = BoneConfig( ... r=8, ... target_modules=[ ... "proj_in", ... "proj_out", ... "to_k", ... "to_q", ... "to_v", ... "to_out.0", ... "ff.net.0.proj", ... "ff.net.2", ... ], ... init_weights=True, ... ) >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> model.text_encoder = BoneModel(model.text_encoder, config_te, "default") >>> model.unet = BoneModel(model.unet, config_unet, "default") ``` **Attributes**: - **model** ([`~torch.nn.Module`]) -- The model to be adapted. - **peft_config** ([`BoneConfig`]): The configuration of the Bone model. """ prefix: str = "bone_" def _check_new_adapter_config(self, config: BoneConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check # does not fully correspond to the error message. if (len(self.peft_config) > 1) and (config.bias != "none"): raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(bone_config, key): return check_target_module_exists(bone_config, key) def _create_and_replace( self, bone_config, adapter_name, target, target_name, parent, current_key, **optional_kwargs, ): if current_key is None: raise ValueError("Current Key shouldn't be `None`") bias = hasattr(target, "bias") and target.bias is not None kwargs = { "r": bone_config.r, "init_weights": bone_config.init_weights, } kwargs["bias"] = bias # If it is not a BoneLayer, create a new module, else update it with new adapters if not isinstance(target, BoneLayer): new_module = self._create_new_module(bone_config, adapter_name, target, **kwargs) if adapter_name not in self.active_adapters: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) else: target.update_layer( adapter_name, r=bone_config.r, init_weights=bone_config.init_weights, ) def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) meta = torch.device("meta") # dispatch to correct device for name, module in new_module.named_modules(): if self.prefix in name: if not any(p.device == meta for p in module.parameters()): module.to(child.weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False for active_adapter in self.active_adapters: bias = self.peft_config[active_adapter].bias if bias == "none": continue if bias == "all": for n, p in model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "bone_only": for name, m in model.named_modules(): if isinstance(m, BoneLayer) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(bone_config, adapter_name, target, **kwargs): if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Linear): new_module = BoneLinear(target, adapter_name, **kwargs) else: raise ValueError( f"Target module {target} is not supported. Currently, only `torch.nn.Linear` is supported." ) return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "base_model": raise return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self): self._set_adapter_layers(enabled=True) def disable_adapter_layers(self): for active_adapter in self.active_adapters: val = self.peft_config[active_adapter].bias if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name): for module in self.model.modules(): if isinstance(module, BoneLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): self._unloading_checks(adapter_names) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, BoneLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] self._delete_auxiliary_adapter(adapter_name, new_active_adapters=new_adapter) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> torch.nn.Module: r""" This method merges the Bone layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the bone modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False)
peft/src/peft/tuners/bone/model.py/0
{ "file_path": "peft/src/peft/tuners/bone/model.py", "repo_id": "peft", "token_count": 5876 }
244
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer class LoKrLayer(nn.Module, LycorisLayer): # All names of layers that may contain adapter weights adapter_layer_names = ( "lokr_w1", "lokr_w1_a", "lokr_w1_b", "lokr_w2", "lokr_w2_a", "lokr_w2_b", "lokr_t2", ) # other_param_names is defined on parent class def __init__(self, base_layer: nn.Module) -> None: super().__init__() LycorisLayer.__init__(self, base_layer) # LoKr info self.lokr_w1 = nn.ParameterDict({}) self.lokr_w1_a = nn.ParameterDict({}) self.lokr_w1_b = nn.ParameterDict({}) self.lokr_w2 = nn.ParameterDict({}) self.lokr_w2_a = nn.ParameterDict({}) self.lokr_w2_b = nn.ParameterDict({}) self.lokr_t2 = nn.ParameterDict({}) @property def _available_adapters(self) -> set[str]: return { *self.lokr_w1, *self.lokr_w1_a, *self.lokr_w1_b, *self.lokr_w2, *self.lokr_w2_a, *self.lokr_w2_b, *self.lokr_t2, } def create_adapter_parameters( self, adapter_name: str, r: int, shape, use_w1: bool, use_w2: bool, use_effective_conv2d: bool, ): if use_w1: self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0])) else: self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r)) self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0])) if len(shape) == 4: # Conv2d if use_w2: self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:])) elif use_effective_conv2d: self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode else: self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r)) self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3])) else: # Linear if use_w2: self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1])) else: self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r)) self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) def reset_adapter_parameters(self, adapter_name: str): if adapter_name in self.lokr_w1: nn.init.zeros_(self.lokr_w1[adapter_name]) else: nn.init.zeros_(self.lokr_w1_a[adapter_name]) nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_w2: nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5)) else: nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_t2: nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5)) def reset_adapter_parameters_random(self, adapter_name: str): if adapter_name in self.lokr_w1: nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5)) else: nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_w2: nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5)) else: nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_t2: nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5)) # Initializes weight matrices similar to the way initialized in the LyCORIS repository. def reset_adapter_parameters_lycoris_way(self, adapter_name): if adapter_name in self.lokr_w1: nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5)) else: nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_w2: nn.init.zeros_(self.lokr_w2[adapter_name]) else: nn.init.zeros_(self.lokr_w2_b[adapter_name]) nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_t2: nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5)) def update_layer( self, adapter_name: str, r: int, alpha: float, rank_dropout: float, module_dropout: float, init_weights: bool, use_effective_conv2d: bool, decompose_both: bool, decompose_factor: int, **kwargs, ) -> None: """Internal function to create lokr adapter Args: adapter_name (`str`): Name for the adapter to add. r (`int`): Rank for the added adapter. alpha (`float`): Alpha for the added adapter. rank_dropout (`float`): The dropout probability for rank dimension during training module_dropout (`float`): The dropout probability for disabling adapter during training. init_weights (`bool`): Whether to initialize adapter weights. use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1. decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix. decompose_factor (`int`): Kronecker product decomposition factor. """ if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.alpha[adapter_name] = alpha self.scaling[adapter_name] = alpha / r self.rank_dropout[adapter_name] = rank_dropout self.module_dropout[adapter_name] = module_dropout self.rank_dropout_scale[adapter_name] = kwargs["rank_dropout_scale"] base_layer = self.get_base_layer() # Determine shape of LoKr weights if isinstance(base_layer, nn.Linear): in_dim, out_dim = base_layer.in_features, base_layer.out_features in_m, in_n = factorization(in_dim, decompose_factor) out_l, out_k = factorization(out_dim, decompose_factor) shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2) use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2) use_effective_conv2d = False elif isinstance(base_layer, nn.Conv2d): in_dim, out_dim = base_layer.in_channels, base_layer.out_channels k_size = base_layer.kernel_size in_m, in_n = factorization(in_dim, decompose_factor) out_l, out_k = factorization(out_dim, decompose_factor) shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size) use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2) use_w2 = r >= max(shape[0][1], shape[1][1]) / 2 use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1) else: raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}") # Create weights with provided shape self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d) # Initialize weights if init_weights: if init_weights == "lycoris": self.reset_adapter_parameters_lycoris_way(adapter_name) else: self.reset_adapter_parameters(adapter_name) else: self.reset_adapter_parameters_random(adapter_name) # Move new weights to device self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def get_delta_weight(self, adapter_name: str) -> torch.Tensor: # https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224 if adapter_name in self.lokr_w1: w1 = self.lokr_w1[adapter_name] else: w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name] if adapter_name in self.lokr_w2: w2 = self.lokr_w2[adapter_name] elif adapter_name in self.lokr_t2: w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name]) else: w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name] # Make weights with Kronecker product weight = make_kron(w1, w2, self.scaling[adapter_name]) weight = weight.reshape(self.get_base_layer().weight.shape) # Perform rank dropout during training - drop rows of addition weights rank_dropout = self.rank_dropout[adapter_name] if self.training and rank_dropout: drop = (torch.rand(weight.size(0)) > rank_dropout).float() drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device) if self.rank_dropout_scale[adapter_name]: drop /= drop.mean() weight *= drop return weight def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) # Execute all the adapters for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue module_dropout = self.module_dropout[active_adapter] # Modify current execution weights if (not self.training) or (self.training and torch.rand(1) > module_dropout): result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs) result = result.to(previous_dtype) return result class Linear(LoKrLayer): """LoKr implemented in Linear layer""" def __init__( self, base_layer: nn.Module, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) input = self._cast_input_dtype(input, delta_weight.dtype) # don't add bias here, because the bias is already included in the output of the base_layer return F.linear(input, delta_weight) def __repr__(self) -> str: rep = super().__repr__() return "lokr." + rep class Conv2d(LoKrLayer): """LoKr implemented in Conv2d layer""" def __init__( self, base_layer: nn.Module, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, use_effective_conv2d: bool = False, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer( adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs ) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) input = self._cast_input_dtype(input, delta_weight.dtype) # don't add bias here, because the bias is already included in the output of the base_layer base_layer = self.get_base_layer() return F.conv2d( input, delta_weight, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups, ) def __repr__(self) -> str: rep = super().__repr__() return "lokr." + rep # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11 def factorization(dimension: int, factor: int = -1) -> tuple[int, int]: """Factorizes the provided number into the product of two numbers Args: dimension (`int`): The number that needs to be factorized. factor (`int`, optional): Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the square root of the dimension. Defaults to -1. Returns: Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is always less than or equal to the second. Example: ```py >>> factorization(256, factor=-1) (16, 16) >>> factorization(128, factor=-1) (8, 16) >>> factorization(127, factor=-1) (1, 127) >>> factorization(128, factor=4) (4, 32) ``` """ if factor > 0 and (dimension % factor) == 0: m = factor n = dimension // factor return m, n if factor == -1: factor = dimension m, n = 1, dimension length = m + n while m < n: new_m = m + 1 while dimension % new_m != 0: new_m += 1 new_n = dimension // new_m if new_m + new_n > length or new_m > factor: break else: m, n = new_m, new_n if m > n: n, m = m, n return m, n def make_weight_cp(t, wa, wb): rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2] return rebuild2 def make_kron(w1, w2, scale=1.0): if len(w2.shape) == 4: w1 = w1.unsqueeze(2).unsqueeze(2) w2 = w2.contiguous() rebuild = torch.kron(w1, w2) return rebuild * scale
peft/src/peft/tuners/lokr/layer.py/0
{ "file_path": "peft/src/peft/tuners/lokr/layer.py", "repo_id": "peft", "token_count": 8044 }
245
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Any, Optional import torch # from torch import nn from peft.import_utils import is_torchao_available from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from .config import LoraConfig from .layer import Linear class TorchaoLoraLinear(Linear): """LoRA layer implementation for Linear layers using torchao data""" def __init__(self, *args, get_apply_tensor_subclass, **kwargs): # this is not strictly necessary, as kwargs are stored either way, but we want to error early if # get_apply_tensor_subclass is missing. if kwargs.get("lora_bias", False): raise ValueError(f"{self.__class__.__name__} does not support lora_bias yet, set it to False") super().__init__(*args, **kwargs) self.get_apply_tensor_subclass = get_apply_tensor_subclass self._check_dtype_supported() def _check_dtype_supported(self): # TODO: Not required once int4_weight_only is properly supported by torchao base_layer = self.get_base_layer() weight = base_layer.weight # pytest tests/test_gpu_examples.py::PeftTorchaoGPUTests::test_causal_lm_training_single_gpu_torchao_0_int8_weight_only if ( # torchao 0.7.0+ (hasattr(weight, "tensor_impl") and (weight.tensor_impl.data.dtype != torch.int8)) or # torchao < 0.7.0 (hasattr(weight, "layout_tensor") and (weight.layout_tensor.data.dtype != torch.int8)) ): raise ValueError(f"{type(self).__name__} only supports int8 weights for now.") def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: from torchao import quantize_ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return self._check_dtype_supported() base_layer = self.get_base_layer() weight = base_layer.weight for active_adapter in adapter_names: try: weight = weight.dequantize() except NotImplementedError as exc: msg = ( f"Weights of type {type(weight).__name__} do not support dequantization (yet), which is needed to " "support merging." ) raise NotImplementedError(msg) from exc if safe_merge and not torch.isfinite(weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) weight += self.get_delta_weight(active_adapter) # TODO: once (if) torchao supports directly mutating the data, use that instead. del base_layer.weight base_layer.weight = weight quantize_(base_layer, self.get_apply_tensor_subclass()) del weight self.merged_adapters.append(active_adapter) def unmerge(self) -> None: from torchao import quantize_ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.lora_A.keys(): continue base_layer = self.get_base_layer() weight = base_layer.weight try: weight = weight.dequantize() except NotImplementedError as exc: msg = ( f"Weights of type {type(weight).__name__} do not support dequantization (yet), which is needed to " "support unmerging." ) raise NotImplementedError(msg) from exc weight -= self.get_delta_weight(active_adapter) # We go through a dummy module because overriding the weight.data does not work, the tensor retains the old # data. Therefore, we need to go through quantize_, which takes a module as input, and we need to delete and # re-assign the weight. # TODO: once (if) torchao supports directly mutating the data, use that instead. del base_layer.weight base_layer.weight = weight quantize_(base_layer, self.get_apply_tensor_subclass()) del weight def __repr__(self) -> str: rep = super().__repr__() return rep.replace("lora.Linear", f"lora.{self.__class__.__name__}") def dispatch_torchao( target: torch.nn.Module, adapter_name: str, lora_config: LoraConfig, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if not hasattr(target_base_layer, "weight"): return new_module if not is_torchao_available(): return new_module from torchao.dtypes import AffineQuantizedTensor from torchao.quantization import LinearActivationQuantizedTensor if isinstance(target_base_layer.weight, (AffineQuantizedTensor, LinearActivationQuantizedTensor)): new_module = TorchaoLoraLinear(target, adapter_name, **kwargs) return new_module
peft/src/peft/tuners/lora/torchao.py/0
{ "file_path": "peft/src/peft/tuners/lora/torchao.py", "repo_id": "peft", "token_count": 2496 }
246
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Optional import bitsandbytes as bnb import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import dequantize_bnb_weight from .layer import OFTLayer if is_bnb_available(): class Linear8bitLt(torch.nn.Module, OFTLayer): # OFT implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 8, oft_block_size: int = 0, module_dropout: float = 0.0, init_weights: bool = True, coft: bool = False, eps: float = 6e-5, block_share: bool = False, use_cayley_neumann: bool = False, num_cayley_neumann_terms: int = 5, **kwargs, ) -> None: super().__init__() OFTLayer.__init__(self, base_layer) self.fan_in_fan_out = False self._active_adapter = adapter_name self.update_layer( adapter_name, r, oft_block_size=oft_block_size, module_dropout=module_dropout, coft=coft, eps=eps, block_share=block_share, init_weights=init_weights, use_cayley_neumann=use_cayley_neumann, num_cayley_neumann_terms=num_cayley_neumann_terms, ) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter not in self.oft_R.keys(): continue warnings.warn("Merge oft module to 8-bit linear may get different generations due to rounding errors.") weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB # Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8 # dequantization directly output = dequantize_bnb_weight(weight, state=state) oft_data = self.get_delta_weight(active_adapter) output = torch.transpose(output, 0, 1) w_data = torch.mm(oft_data, output.to(oft_data.dtype)) w_data = torch.transpose(w_data, 0, 1) w_data = output.to(oft_data.dtype).to(oft_data.device) if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.oft_R.keys(): continue warnings.warn( "Unmerge oft module to 8-bit linear may get different generations due to rounding errors." ) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB output = dequantize_bnb_weight(weight, state=state) oft_data = self.get_delta_weight(active_adapter) output = torch.transpose(output, 0, 1) w_data = torch.mm(oft_data.t(), output.to(oft_data.dtype)) w_data = torch.transpose(w_data, 0, 1) w_data = w_data.to(oft_data.dtype).to(oft_data.device) self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() def get_delta_weight(self, adapter): return self.oft_R[adapter].get_weight() def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: for active_adapter in self.active_adapters: if active_adapter not in self.oft_R.keys(): continue oft_R = self.oft_R[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = x.dtype x = self._cast_input_dtype(x, oft_R.weight.dtype) x = oft_R(x) if requires_conversion: x = x.to(expected_dtype) result = self.base_layer(x, *args, **kwargs) return result def __repr__(self) -> str: rep = super().__repr__() return "oft." + rep def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs): new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target loaded_in_8bit = kwargs.get("loaded_in_8bit", False) if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): eightbit_kwargs = kwargs.copy() eightbit_kwargs.update( { "has_fp16_weights": target.state.has_fp16_weights, "threshold": target.state.threshold, "index": target.index, } ) new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs) return new_module if is_bnb_4bit_available(): class Linear4bit(torch.nn.Module, OFTLayer): # OFT implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 8, oft_block_size: int = 0, module_dropout: float = 0.0, coft: bool = False, eps: float = 6e-5, block_share: bool = False, init_weights: bool = True, use_cayley_neumann: bool = False, num_cayley_neumann_terms: int = 5, **kwargs, ) -> None: super().__init__() OFTLayer.__init__(self, base_layer) self.fan_in_fan_out = False self._active_adapter = adapter_name self.update_layer( adapter_name, r, oft_block_size=oft_block_size, module_dropout=module_dropout, coft=coft, eps=eps, block_share=block_share, init_weights=init_weights, use_cayley_neumann=use_cayley_neumann, num_cayley_neumann_terms=num_cayley_neumann_terms, ) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter not in self.oft_R.keys(): continue warnings.warn("Merge oft module to 4-bit linear may get different generations due to rounding errors.") # Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930 weight = self.get_base_layer().weight kwargs = weight.__dict__ output = dequantize_bnb_weight(weight, state=weight.quant_state) oft_data = self.get_delta_weight(active_adapter) output = torch.transpose(output, 0, 1) w_data = torch.mm(oft_data, output.to(oft_data.dtype)) w_data = torch.transpose(w_data, 0, 1) w_data = output.to(oft_data.dtype).to(oft_data.device) if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) if "bnb_quantized" in kwargs: kwargs["bnb_quantized"] = False kwargs["requires_grad"] = False kwargs.pop("data", None) # torch.compile can introduce attributes preceded by '_', remove them kwargs = {k: v for k, v in kwargs.items() if not k.startswith("_")} self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.oft_R.keys(): continue warnings.warn( "Unmerge oft module to 4-bit linear may get different generations due to rounding errors." ) weight = self.get_base_layer().weight kwargs = weight.__dict__ output = dequantize_bnb_weight(weight, state=weight.quant_state) oft_data = self.get_delta_weight(active_adapter) output = torch.transpose(output, 0, 1) w_data = torch.mm(oft_data.t(), output.to(oft_data.dtype)) w_data = torch.transpose(w_data, 0, 1) w_data = output.to(oft_data.dtype).to(oft_data.device) if "bnb_quantized" in kwargs: kwargs["bnb_quantized"] = False kwargs["requires_grad"] = False kwargs.pop("data", None) self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), **kwargs).to(weight.device) def get_delta_weight(self, adapter): return self.oft_R[adapter].get_weight() def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: # As per Tim Dettmers, for 4bit, we need to defensively clone here. # The reason is that in some cases, an error can occur that backprop # does not work on a manipulated view. This issue may be solved with # newer PyTorch versions but this would need extensive testing to be # sure. # result = result.clone() for active_adapter in self.active_adapters: if active_adapter not in self.oft_R.keys(): continue oft_R = self.oft_R[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = x.dtype x = self._cast_input_dtype(x, oft_R.weight.dtype) x = oft_R(x) if requires_conversion: x = x.to(expected_dtype) result = self.base_layer(x, *args, **kwargs) return result def __repr__(self) -> str: rep = super().__repr__() return "oft." + rep def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs): new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target loaded_in_4bit = kwargs.get("loaded_in_4bit", False) if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target_base_layer.compute_dtype, "compress_statistics": target_base_layer.weight.compress_statistics, "quant_type": target_base_layer.weight.quant_type, } ) new_module = Linear4bit(target, adapter_name, **fourbit_kwargs) return new_module
peft/src/peft/tuners/oft/bnb.py/0
{ "file_path": "peft/src/peft/tuners/oft/bnb.py", "repo_id": "peft", "token_count": 8087 }
247
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class VeraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`VeraModel`]. Paper: https://huggingface.co/papers/2310.11454. Args: r (`int`, *optional*, defaults to `256`): VeRA parameter dimension ("rank"). Choose higher values than LoRA ranks here, since VeRA uses far fewer parameters than LoRA (see Table 1). target_modules (`Union[List[str], str]`): The names of the modules to apply Vera to. Only linear layers are supported. projection_prng_key (`int`): Vera PRNG init key. Used for initialising vera_A and vera_B for new models or when loading a checkpoint that did not include these projections. Defaults to `0`. save_projection (`bool`): Whether to save the vera_A / vera_B projections in the state dict alongside per layer lambda_b / lambda_d weights. This will increase the size of the checkpoint, but guarantee that we can reload the checkpoint on all system configurations. Defaults to `True`. vera_dropout (`float`): The dropout probability for Vera layers. d_initial (`float`, *optional*, defaults to `0.1`): Initial init value for `vera_lambda_d` vector used when initializing the VeRA parameters. Small values (<=0.1) are recommended (see Table 6c in the paper). fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. bias (`str`): Bias type for Vera. Can be 'none', 'all' or 'vera_only'. If 'all' or 'vera_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. modules_to_save (`List[str]`): List of modules apart from Vera layers to be set as trainable and saved in the final checkpoint. init_weights (`bool`): Whether to initialize the weights of the Vera layers with their default initialization. Don't change this setting, except if you know exactly what you're doing. layers_to_transform (`Union[List[int],int]`): The layer indexes to transform, if this argument is specified, it will apply the Vera transformations on the layer indexes that are specified in this list. If a single integer is passed, it will apply the Vera transformations on the layer at this index. layers_pattern (`Optional[Union[List[str], str]]`): The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`. """ r: int = field(default=256, metadata={"help": "Vera attention dimension"}) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with Vera." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. " "Only linear layers are supported." ) }, ) projection_prng_key: int = field( default=0, metadata={ "help": ( "Vera PRNG init key. Used for initialising vera_A and vera_B for new models or when loading a " "checkpoint that did not include these projections." ) }, ) save_projection: bool = field( default=True, metadata={ "help": ( "Whether to save the vera_A / vera_B projections in the state dict alongside per layer lambda_b / " "lambda_d weights. This will increase the size of the checkpoint, but guarantee that we can reload " "the checkpoint on all system configurations." ) }, ) vera_dropout: float = field(default=0.0, metadata={"help": "Vera dropout"}) d_initial: float = field(default=0.1, metadata={"help": "Initial init value for d vector."}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: str = field(default="none", metadata={"help": "Bias type for Vera. Can be 'none', 'all' or 'vera_only'"}) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": ( "List of modules apart from Vera layers to be set as trainable and saved in the final checkpoint. For" " example, in Sequence Classification or Token Classification tasks, the final layer" " `classifier/score` are randomly initialized and as such need to be trainable and saved." ) }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the Vera layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": ( "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers" " indexes that are specified inside this list. If a single integer is passed, PEFT will transform only" " the layer at this index." ) }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer " "pattern is not in the common layers pattern. This should target the `nn.ModuleList` of the " "model, which is often called `'layers'` or `'h'`." ) }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.VERA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ") if not self.save_projection: warnings.warn( "Specified to not save vera_A and vera_B within the state dictionary, instead they will be restored " "using the PRNG key store in `config.projection_prng_key`. Consider setting `config.save_projection` " "to `True` to guarantee restoring the checkpoint correctly on all system configurations." )
peft/src/peft/tuners/vera/config.py/0
{ "file_path": "peft/src/peft/tuners/vera/config.py", "repo_id": "peft", "token_count": 3108 }
248
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from typing import Optional class PeftType(str, enum.Enum): """ Enum class for the different types of adapters in PEFT. Supported PEFT types: - PROMPT_TUNING - MULTITASK_PROMPT_TUNING - P_TUNING - PREFIX_TUNING - LORA - ADALORA - BOFT - ADAPTION_PROMPT - IA3 - LOHA - LOKR - OFT - XLORA - POLY - LN_TUNING - VERA - FOURIERFT - HRA - BONE - MISS - RANDLORA - SHIRA - C3A - ROAD """ PROMPT_TUNING = "PROMPT_TUNING" MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING" P_TUNING = "P_TUNING" PREFIX_TUNING = "PREFIX_TUNING" LORA = "LORA" ADALORA = "ADALORA" BOFT = "BOFT" ADAPTION_PROMPT = "ADAPTION_PROMPT" IA3 = "IA3" LOHA = "LOHA" LOKR = "LOKR" OFT = "OFT" POLY = "POLY" LN_TUNING = "LN_TUNING" VERA = "VERA" FOURIERFT = "FOURIERFT" XLORA = "XLORA" HRA = "HRA" VBLORA = "VBLORA" CPT = "CPT" BONE = "BONE" MISS = "MISS" RANDLORA = "RANDLORA" ROAD = "ROAD" TRAINABLE_TOKENS = "TRAINABLE_TOKENS" SHIRA = "SHIRA" C3A = "C3A" class TaskType(str, enum.Enum): """ Enum class for the different types of tasks supported by PEFT. Overview of the supported task types: - SEQ_CLS: Text classification. - SEQ_2_SEQ_LM: Sequence-to-sequence language modeling. - CAUSAL_LM: Causal language modeling. - TOKEN_CLS: Token classification. - QUESTION_ANS: Question answering. - FEATURE_EXTRACTION: Feature extraction. Provides the hidden states which can be used as embeddings or features for downstream tasks. """ SEQ_CLS = "SEQ_CLS" SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM" CAUSAL_LM = "CAUSAL_LM" TOKEN_CLS = "TOKEN_CLS" QUESTION_ANS = "QUESTION_ANS" FEATURE_EXTRACTION = "FEATURE_EXTRACTION" def register_peft_method( *, name: str, config_cls, model_cls, prefix: Optional[str] = None, is_mixed_compatible=False ) -> None: """ Function to register a finetuning method like LoRA to be available in PEFT. This method takes care of registering the PEFT method's configuration class, the model class, and optionally the prefix. Args: name (str): The name of the PEFT method. It must be unique. config_cls: The configuration class of the PEFT method. model_cls: The model class of the PEFT method. prefix (Optional[str], optional): The prefix of the PEFT method. It should be unique. If not provided, the name of the PEFT method is used as the prefix. is_mixed_compatible (bool, optional): Whether the PEFT method is compatible with `PeftMixedModel`. If you're not sure, leave it as False (default). Example: ```py # inside of peft/tuners/my_peft_method/__init__.py from peft.utils import register_peft_method register_peft_method(name="my_peft_method", config_cls=MyConfig, model_cls=MyModel) ``` """ from peft.mapping import ( PEFT_TYPE_TO_CONFIG_MAPPING, PEFT_TYPE_TO_MIXED_MODEL_MAPPING, PEFT_TYPE_TO_PREFIX_MAPPING, PEFT_TYPE_TO_TUNER_MAPPING, ) if name.endswith("_"): raise ValueError(f"Please pass the name of the PEFT method without '_' suffix, got {name}.") if not name.islower(): raise ValueError(f"The name of the PEFT method should be in lower case letters, got {name}.") if name.upper() not in list(PeftType): raise ValueError(f"Unknown PEFT type {name.upper()}, please add an entry to peft.utils.peft_types.PeftType.") peft_type = getattr(PeftType, name.upper()) # model_cls can be None for prompt learning methods, which don't have dedicated model classes if prefix is None: prefix = name + "_" if ( (peft_type in PEFT_TYPE_TO_CONFIG_MAPPING) or (peft_type in PEFT_TYPE_TO_TUNER_MAPPING) or (peft_type in PEFT_TYPE_TO_MIXED_MODEL_MAPPING) ): raise KeyError(f"There is already PEFT method called '{name}', please choose a unique name.") if prefix in PEFT_TYPE_TO_PREFIX_MAPPING: raise KeyError(f"There is already a prefix called '{prefix}', please choose a unique prefix.") model_cls_prefix = getattr(model_cls, "prefix", None) if (model_cls_prefix is not None) and (model_cls_prefix != prefix): raise ValueError( f"Inconsistent prefixes found: '{prefix}' and '{model_cls_prefix}' (they should be the same)." ) PEFT_TYPE_TO_PREFIX_MAPPING[peft_type] = prefix PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] = config_cls PEFT_TYPE_TO_TUNER_MAPPING[peft_type] = model_cls if is_mixed_compatible: PEFT_TYPE_TO_MIXED_MODEL_MAPPING[peft_type] = model_cls
peft/src/peft/utils/peft_types.py/0
{ "file_path": "peft/src/peft/utils/peft_types.py", "repo_id": "peft", "token_count": 2357 }
249
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform import tempfile from unittest.mock import Mock, call, patch import pytest import torch from safetensors.torch import load_file as safe_load_file from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, Trainer, TrainingArguments, ) from peft import ( AdaLoraConfig, BOFTConfig, BoneConfig, C3AConfig, CPTConfig, FourierFTConfig, HRAConfig, IA3Config, LoraConfig, MissConfig, OFTConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, PromptTuningInit, RoadConfig, ShiraConfig, VBLoRAConfig, VeraConfig, get_peft_model, ) from .testing_common import PeftCommonTester from .testing_utils import device_count, hub_online_once, load_dataset_english_quotes, set_init_weights_false PEFT_DECODER_MODELS_TO_TEST = [ "hf-internal-testing/tiny-random-OPTForCausalLM", "hf-internal-testing/tiny-random-GPT2LMHeadModel", "hf-internal-testing/tiny-random-BloomForCausalLM", "hf-internal-testing/tiny-random-gpt_neo", "hf-internal-testing/tiny-random-GPTJForCausalLM", "hf-internal-testing/tiny-random-GPTBigCodeForCausalLM", "trl-internal-testing/tiny-random-LlamaForCausalLM", "peft-internal-testing/tiny-dummy-qwen2", "hf-internal-testing/tiny-random-Gemma3ForCausalLM", ] SMALL_GRID_MODELS = [ "hf-internal-testing/tiny-random-gpt2", "hf-internal-testing/tiny-random-OPTForCausalLM", "hf-internal-testing/tiny-random-MistralForCausalLM", "peft-internal-testing/tiny-dummy-qwen2", "trl-internal-testing/tiny-random-LlamaForCausalLM", ] # TODO Missing from this list are LoKr, LoHa, LN Tuning, add them # Note: If the PEFT method offers an initialization option to make it an identity transform (typically via the # init_weights argument), then this option should be set here, if it's not already the default. ALL_CONFIGS = [ ( AdaLoraConfig, { "task_type": "CAUSAL_LM", "target_modules": None, "total_step": 1, }, ), ( BOFTConfig, { "task_type": "CAUSAL_LM", "target_modules": None, }, ), ( BoneConfig, { "task_type": "CAUSAL_LM", "target_modules": None, "r": 2, }, ), ( MissConfig, { "task_type": "CAUSAL_LM", "target_modules": None, "r": 2, }, ), ( CPTConfig, { "task_type": "CAUSAL_LM", "cpt_token_ids": [0, 1, 2, 3, 4, 5, 6, 7], # Example token IDs for testing "cpt_mask": [1, 1, 1, 1, 1, 1, 1, 1], "cpt_tokens_type_mask": [1, 2, 2, 2, 3, 3, 4, 4], }, ), ( FourierFTConfig, { "task_type": "CAUSAL_LM", "n_frequency": 10, "target_modules": None, }, ), ( HRAConfig, { "task_type": "CAUSAL_LM", "target_modules": None, }, ), ( IA3Config, { "task_type": "CAUSAL_LM", "target_modules": None, "feedforward_modules": None, }, ), ( LoraConfig, { "task_type": "CAUSAL_LM", "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", }, ), # LoRA + trainable tokens ( LoraConfig, { "task_type": "CAUSAL_LM", "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", "trainable_token_indices": [0, 1, 3], }, ), ( OFTConfig, { "task_type": "CAUSAL_LM", "target_modules": None, }, ), ( PrefixTuningConfig, { "task_type": "CAUSAL_LM", "num_virtual_tokens": 10, }, ), ( PromptEncoderConfig, { "task_type": "CAUSAL_LM", "num_virtual_tokens": 10, "encoder_hidden_size": 32, }, ), ( PromptTuningConfig, { "task_type": "CAUSAL_LM", "num_virtual_tokens": 10, }, ), ( RoadConfig, { "task_type": "CAUSAL_LM", "variant": "road_1", "group_size": 2, }, ), ( ShiraConfig, { "r": 1, "task_type": "CAUSAL_LM", "target_modules": None, "init_weights": False, }, ), ( VBLoRAConfig, { "task_type": "CAUSAL_LM", "target_modules": None, "vblora_dropout": 0.05, "vector_length": 1, "num_vectors": 2, }, ), ( VeraConfig, { "task_type": "CAUSAL_LM", "r": 8, "target_modules": None, "vera_dropout": 0.05, "projection_prng_key": 0xFF, "d_initial": 0.1, "save_projection": True, "bias": "none", }, ), ( C3AConfig, { "task_type": "CAUSAL_LM", "block_size": 1, # Some test cases contain shapes of prime numbers where `block_size` must be 1 "target_modules": None, }, ), ] def _skip_if_not_conv1d_supported(model_id, config_cls): if "GPT2LMHeadModel" in model_id and config_cls in [ BOFTConfig, BoneConfig, HRAConfig, OFTConfig, RoadConfig, ShiraConfig, C3AConfig, MissConfig, ]: pytest.skip("Skipping BOFT/HRA/OFT/Bone/Road/SHiRA/C3A/MiSS for GPT2LMHeadModel") def _skip_adalora_oft_hra_bone_for_gpt2(model_id, config_cls): if "GPT2LMHeadModel" in model_id and config_cls in [ AdaLoraConfig, BOFTConfig, HRAConfig, OFTConfig, BoneConfig, C3AConfig, RoadConfig, MissConfig, ]: pytest.skip("Skipping AdaLora/BOFT/HRA/OFT/Bone/MiSS for GPT2LMHeadModel") class TestDecoderModels(PeftCommonTester): transformers_class = AutoModelForCausalLM def skipTest(self, reason=""): # for backwards compatibility with unittest style test classes pytest.skip(reason) def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) return {"input_ids": input_ids, "attention_mask": attention_mask} @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_attributes_parametrized(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_model_attr(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_adapter_name(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_adapter_name(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_prepare_for_training_parametrized(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_prepare_for_training(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_prompt_tuning_text_prepare_for_training(self, model_id, config_cls, config_kwargs): if config_cls != PromptTuningConfig: pytest.skip(f"This test does not apply to {config_cls}") config_kwargs = config_kwargs.copy() config_kwargs["prompt_tuning_init"] = PromptTuningInit.TEXT config_kwargs["prompt_tuning_init_text"] = "This is a test prompt." config_kwargs["tokenizer_name_or_path"] = model_id self._test_prepare_for_training(model_id, config_cls, config_kwargs.copy()) def test_prompt_tuning_text_tokenizer_kwargs(self): # Allow users to pass additional arguments to Tokenizer.from_pretrained # Fix for #1032 mock = Mock() orig_from_pretrained = AutoTokenizer.from_pretrained def mock_autotokenizer_from_pretrained(*args, **kwargs): mock(*args, **kwargs) return orig_from_pretrained(config.tokenizer_name_or_path) model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" config = PromptTuningConfig( base_model_name_or_path=model_id, tokenizer_name_or_path=model_id, num_virtual_tokens=10, prompt_tuning_init=PromptTuningInit.TEXT, task_type="CAUSAL_LM", prompt_tuning_init_text="This is a test prompt.", tokenizer_kwargs={"trust_remote_code": True, "foo": "bar"}, ) model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) with patch("transformers.AutoTokenizer.from_pretrained", mock_autotokenizer_from_pretrained): _ = get_peft_model(model, config) expected_call = call(model_id, trust_remote_code=True, foo="bar") assert mock.call_args == expected_call def test_prompt_tuning_config_invalid_args(self): # Raise an error when tokenizer_kwargs is used with prompt_tuning_init!='TEXT', because this argument has no # function in that case model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" with pytest.raises(ValueError, match="tokenizer_kwargs only valid when using prompt_tuning_init='TEXT'."): PromptTuningConfig( base_model_name_or_path=model_id, tokenizer_name_or_path=model_id, num_virtual_tokens=10, task_type="CAUSAL_LM", prompt_tuning_init_text="This is a test prompt.", prompt_tuning_init=PromptTuningInit.RANDOM, # <= should not be used together with tokenizer_kwargs tokenizer_kwargs={"trust_remote_code": True, "foo": "bar"}, ) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_save_pretrained(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained_pickle(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_save_pretrained(model_id, config_cls, config_kwargs.copy(), safe_serialization=False) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained_selected_adapters_pickle(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_save_pretrained_selected_adapters( model_id, config_cls, config_kwargs.copy(), safe_serialization=False ) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_merge_layers(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_merge_layers(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_merge_layers_multi(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_merge_layers_multi(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_merge_layers_nan(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_merge_layers_nan(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_mixed_adapter_batches(self, model_id, config_cls, config_kwargs): if config_cls != LoraConfig: pytest.skip("Mixed adapter batches not supported for this config.") config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_mixed_adapter_batches(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_generate_with_mixed_adapter_batches(self, model_id, config_cls, config_kwargs): if config_cls != LoraConfig: pytest.skip("Mixed adapter batches not supported for this config.") config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_generate_with_mixed_adapter_batches_and_beam_search(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_generate(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_generate(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_generate_pos_args(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_generate_pos_args(model_id, config_cls, config_kwargs.copy(), raises_err=False) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_merge_layers_fp16(self, model_id, config_cls, config_kwargs): self._test_merge_layers_fp16(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_generate_half_prec(self, model_id, config_cls, config_kwargs): self._test_generate_half_prec(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs): self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_decoders(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_training(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_decoders_layer_indexing(self, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_decoders_gradient_checkpointing(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_inference_safetensors(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_inference_safetensors(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_peft_model_device_map(self, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_delete_adapter(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_delete_adapter(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_unload_adapter(self, model_id, config_cls, config_kwargs): _skip_adalora_oft_hra_bone_for_gpt2(model_id, config_cls) _skip_if_not_conv1d_supported(model_id, config_cls) config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_unload_adapter(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_disable_adapter(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_disable_adapter(model_id, config_cls, config_kwargs.copy()) def test_generate_adalora_no_dropout(self): # test for issue #730 model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" config_kwargs = { "target_modules": None, "task_type": "CAUSAL_LM", "lora_dropout": 0.0, "total_step": 1, } self._test_generate(model_id, AdaLoraConfig, config_kwargs.copy()) @pytest.mark.parametrize("model_id", PEFT_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_passing_input_embeds_works(self, model_id, config_cls, config_kwargs): _skip_if_not_conv1d_supported(model_id, config_cls) if (platform.system() == "Darwin") and (config_cls == PrefixTuningConfig): # the error is: # > RuntimeError: unsupported operation: more than one element of the written-to tensor refers to a single # > memory location. Please clone() the tensor before performing the operation. # in transformers sdpa_mask_older_torch. As we (currently) cannot upgrade PyTorch on MacOS GH runners, we're # stuck with this error. # TODO: remove if torch can be upgraded on MacOS or if MacOS CI is removed pytest.skip("Prefix tuning fails on MacOS in this case, not worth fixing") self._test_passing_input_embeds_works("", model_id, config_cls, config_kwargs.copy()) def test_lora_layer_replication(self): model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM" config_kwargs = { "target_modules": ["down_proj", "up_proj"], "task_type": "CAUSAL_LM", "lora_dropout": 0.0, "layer_replication": [[0, 1], [0, 2], [1, 2]], } model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = LoraConfig(base_model_name_or_path=model_id, **config_kwargs) assert len(model.model.layers), "Expected 2 layers in original model." == 2 model = get_peft_model(model, config) layers = model.base_model.model.model.layers assert len(layers) == 4, "Expected 4 layers in adapted model." assert ( layers[0].mlp.up_proj.base_layer.weight.data.storage().data_ptr() == layers[1].mlp.up_proj.base_layer.weight.data.storage().data_ptr() and layers[2].mlp.up_proj.base_layer.weight.data.storage().data_ptr() == layers[3].mlp.up_proj.base_layer.weight.data.storage().data_ptr() ), "Expected layers 0-1 and 2-3 to share weights" assert ( layers[0].mlp.up_proj.base_layer.weight.data.storage().data_ptr() != layers[2].mlp.up_proj.base_layer.weight.data.storage().data_ptr() ), "Expected layers 0 and 2 to have different weights" assert ( layers[0].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr() != layers[1].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr() and layers[2].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr() != layers[3].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr() ), "Expected all LoRA adapters to have distinct weights" assert len([n for n, _ in model.named_parameters() if ".lora_A." in n]) == 8, ( "Expected 8 LoRA adapters since we are adding one each for up and down." ) self._test_prepare_for_training(model_id, LoraConfig, config_kwargs.copy()) self._test_generate(model_id, LoraConfig, config_kwargs.copy()) def test_prompt_learning_with_grouped_query_attention(self): # See 1901, fixes a bug with handling GQA model_id = "peft-internal-testing/tiny-dummy-qwen2" base_model = AutoModelForCausalLM.from_pretrained(model_id) peft_config = PrefixTuningConfig(num_virtual_tokens=10, task_type="CAUSAL_LM") model = get_peft_model(base_model, peft_config) x = torch.tensor([[1, 2, 3]]) # does not raise model(x) def test_prefix_tuning_mistral(self): # See issue 869, 1962 model_id = "hf-internal-testing/tiny-random-MistralForCausalLM" base_model = AutoModelForCausalLM.from_pretrained(model_id) peft_config = PrefixTuningConfig(num_virtual_tokens=10, task_type="CAUSAL_LM") model = get_peft_model(base_model, peft_config) tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token def process(samples): tokenized = tokenizer(samples["quote"], truncation=True, max_length=128) return tokenized data = load_dataset_english_quotes() data = data.map(process, batched=True) with tempfile.TemporaryDirectory() as tmp_dirname: trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( num_train_epochs=1, max_steps=5, per_device_train_batch_size=4, output_dir=tmp_dirname, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) trainer.train() @pytest.mark.parametrize("model_id", SMALL_GRID_MODELS) @pytest.mark.parametrize( "config_cls,config_kwargs", [ ( PromptTuningConfig, { "num_virtual_tokens": 10, "task_type": "CAUSAL_LM", }, ), ( PrefixTuningConfig, { "num_virtual_tokens": 10, "task_type": "CAUSAL_LM", }, ), ( PromptEncoderConfig, { "num_virtual_tokens": 10, "encoder_hidden_size": 32, "task_type": "CAUSAL_LM", }, ), ( CPTConfig, { "cpt_token_ids": [0, 1, 2, 3, 4, 5, 6, 7], # Example token IDs for testing "cpt_mask": [1, 1, 1, 1, 1, 1, 1, 1], "cpt_tokens_type_mask": [1, 2, 2, 2, 3, 3, 4, 4], }, ), ], ) def test_prompt_learning_with_gradient_checkpointing(self, model_id, config_cls, config_kwargs): # See issue 869 # Test prompt learning methods with gradient checkpointing in a semi realistic setting. # Prefix tuning does not work if the model uses the new caching implementation. In that case, a helpful error # should be raised. # skip if multi GPU, since this results in DataParallel usage by Trainer, which fails with "CUDA device # assertion", breaking subsequent tests if device_count > 1: pytest.skip("Skip on multi-GPU setups") peft_config = config_cls(base_model_name_or_path=model_id, **config_kwargs) base_model = self.transformers_class.from_pretrained(model_id) base_model.gradient_checkpointing_enable() try: model = get_peft_model(base_model, peft_config) except ValueError as exc: # Some methods will raise a helpful error. After this, exit the test, as training would fail. assert config_cls == PrefixTuningConfig assert "Prefix tuning does not work with gradient checkpointing" in str(exc) return tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.pad_token = tokenizer.eos_token def process(samples): tokenized = tokenizer(samples["quote"], truncation=True, max_length=128) return tokenized data = load_dataset_english_quotes() data = data.map(process, batched=True) with tempfile.TemporaryDirectory() as tmp_dirname: trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( num_train_epochs=1, max_steps=3, per_device_train_batch_size=4, output_dir=tmp_dirname, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) trainer.train() @pytest.mark.parametrize("save_embedding_layers", ["auto", True, False]) @pytest.mark.parametrize( "peft_config", [ (LoraConfig(target_modules=["lin0", "embed_tokens"], init_lora_weights=False)), (LoraConfig(target_modules=r".*\.embed_tokens", init_lora_weights=False)), ], ) def test_save_pretrained_targeting_lora_to_embedding_layer(self, save_embedding_layers, tmp_path, peft_config): model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM" with hub_online_once(model_id): model = AutoModelForCausalLM.from_pretrained(model_id) model = get_peft_model(model, peft_config) if save_embedding_layers == "auto": # assert warning msg_start = "Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`." with pytest.warns(UserWarning, match=msg_start): model.save_pretrained(tmp_path, save_embedding_layers=save_embedding_layers) else: model.save_pretrained(tmp_path, save_embedding_layers=save_embedding_layers) state_dict = safe_load_file(tmp_path / "adapter_model.safetensors") contains_embedding = "base_model.model.model.embed_tokens.base_layer.weight" in state_dict if save_embedding_layers in ["auto", True]: assert contains_embedding assert torch.allclose( model.base_model.model.model.embed_tokens.base_layer.weight, state_dict["base_model.model.model.embed_tokens.base_layer.weight"], ) else: assert not contains_embedding
peft/tests/test_decoder_models.py/0
{ "file_path": "peft/tests/test_decoder_models.py", "repo_id": "peft", "token_count": 14938 }
250
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import itertools import os import platform import re import tempfile import unittest import pytest import torch from parameterized import parameterized from torch import nn from transformers import AutoModelForCausalLM from peft import ( AdaLoraConfig, LoHaConfig, LoKrConfig, LoraConfig, PeftMixedModel, PrefixTuningConfig, get_peft_model, ) from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import infer_device class SimpleNet(nn.Module): def __init__(self, bias=True): super().__init__() # note: out_features must be > rank or else OFT will be an identity transform self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.lin1 = nn.Linear(20, 16, bias=bias) def forward(self, X): X = X.float() X = self.lin0(X) X = self.relu(X) X = self.lin1(X) return X def _param_name_func(testcase_func, param_num, params): # for parameterized tests in TextMixedAdapterTypes config0, config1 = params[0] name0 = config0.__class__.__name__[: -len("Config")] name1 = config1.__class__.__name__[: -len("Config")] if name0 != name1: return f"{testcase_func.__name__}_{param_num}_{name0}_{name1}" return f"{testcase_func.__name__}_{param_num}_{name0}_x2" class TestMixedAdapterTypes(unittest.TestCase): torch_device = infer_device() def _get_model(self, model_cls, peft_config=None, adapter_name=None, seed=0, mixed=True): torch.manual_seed(0) # always use seed 0 for base model, seed for adapters may differ base_model = model_cls().eval().to(self.torch_device) if peft_config is None: return base_model torch.manual_seed(seed) assert adapter_name is not None peft_model = get_peft_model(base_model, peft_config, adapter_name=adapter_name, mixed=mixed) return peft_model.eval().to(self.torch_device) def _check_mixed_outputs(self, model_cls, config0, config1, input, *, is_commutative): # This test checks different combinations of adapter0, adapter1, or combinations of the two, and whether # outputs are the same/different, depending on context. If we pass is_commutative=True, it means that the order # of adapters does not matter, and we expect the same output regardless of the order in which adapters are # applied. # We have to very careful with resetting the random seed each time it is used, otherwise the adapters may be # initialized with different values, and the test will fail. atol = 1e-5 rtol = 1e-5 seed0 = 0 seed1 = 1 # base model base_model = self._get_model(model_cls) output_base = base_model(input) assert torch.isfinite(output_base).all() # adapter 0 peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0) output_config0 = peft_model_0(input) assert torch.isfinite(output_config0).all() assert not torch.allclose(output_base, output_config0, atol=atol, rtol=rtol) # adapter 1 peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1) output_config1 = peft_model_1(input) assert torch.isfinite(output_config1).all() assert not torch.allclose(output_base, output_config1, atol=atol, rtol=rtol) assert not torch.allclose(output_config0, output_config1, atol=atol, rtol=rtol) # adapter 0 + 1 peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0) torch.manual_seed(seed1) peft_model_01.add_adapter("adapter1", config1) peft_model_01.set_adapter(["adapter0", "adapter1"]) output_mixed_01 = peft_model_01(input) # check the number of tuner layer types tuner_layers = [mod for mod in peft_model_01.modules() if isinstance(mod, BaseTunerLayer)] tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers} if type(config0) is type(config1): assert len(tuner_types) == 1 else: assert len(tuner_types) == 2 assert peft_model_01.active_adapters == ["adapter0", "adapter1"] assert torch.isfinite(output_mixed_01).all() assert not torch.allclose(output_config0, output_mixed_01, atol=atol, rtol=rtol) assert not torch.allclose(output_config1, output_mixed_01, atol=atol, rtol=rtol) if is_commutative: delta0 = output_config0 - output_base delta1 = output_config1 - output_base delta_mixed_01 = output_mixed_01 - output_base assert torch.allclose((delta0 + delta1), delta_mixed_01, atol=atol, rtol=rtol) # adapter 1 + 0 peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1) torch.manual_seed(seed0) peft_model_10.add_adapter("adapter0", config0) peft_model_10.set_adapter(["adapter1", "adapter0"]) output_mixed_10 = peft_model_10(input) # check the number of tuner layer types tuner_layers = [mod for mod in peft_model_10.modules() if isinstance(mod, BaseTunerLayer)] tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers} if type(config0) is type(config1): assert len(tuner_types) == 1 else: assert len(tuner_types) == 2 assert peft_model_10.active_adapters == ["adapter1", "adapter0"] assert torch.isfinite(output_mixed_10).all() assert not torch.allclose(output_config0, output_mixed_10, atol=atol, rtol=rtol) assert not torch.allclose(output_config1, output_mixed_10, atol=atol, rtol=rtol) if is_commutative: assert torch.allclose(output_mixed_01, output_mixed_10, atol=atol, rtol=rtol) # turn around the order of the adapters of the 0 + 1 mixed model, should behave like the 0 + 1 mixed model peft_model_10.set_adapter(["adapter0", "adapter1"]) output_mixed_reversed = peft_model_10(input) # check the number of tuner layer types tuner_layers = [mod for mod in peft_model_10.modules() if isinstance(mod, BaseTunerLayer)] tuner_types = {type(tuner_layer) for tuner_layer in tuner_layers} if type(config0) is type(config1): assert len(tuner_types) == 1 else: assert len(tuner_types) == 2 assert peft_model_10.active_adapters == ["adapter0", "adapter1"] assert torch.isfinite(output_mixed_reversed).all() assert not torch.allclose(output_mixed_reversed, output_config0, atol=atol, rtol=rtol) assert not torch.allclose(output_mixed_reversed, output_config1, atol=atol, rtol=rtol) if is_commutative: assert torch.allclose(output_mixed_reversed, output_mixed_01, atol=atol, rtol=rtol) assert torch.allclose(output_mixed_reversed, output_mixed_10, atol=atol, rtol=rtol) def _check_merging(self, model_cls, config0, config1, input): # Ensure that when merging mixed adapters, the result is the same as when applying the adapters separately. # Merging requires a bit higher tolerance for some adapters, which can also vary depending on CPU vs GPU. atol = 1e-4 rtol = 1e-4 seed0 = 0 seed1 = 1 # adapter 0 + 1 peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0) torch.manual_seed(seed1) peft_model_01.add_adapter("adapter1", config1) peft_model_01.set_adapter(["adapter0", "adapter1"]) output_mixed_01 = peft_model_01(input) model_merged_01 = peft_model_01.merge_and_unload() output_merged_01 = model_merged_01(input) assert torch.allclose(output_mixed_01, output_merged_01, atol=atol, rtol=rtol) # adapter 1 + 0 peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1) torch.manual_seed(seed0) peft_model_10.add_adapter("adapter0", config0) peft_model_10.set_adapter(["adapter1", "adapter0"]) output_mixed_10 = peft_model_10(input) model_merged_10 = peft_model_10.merge_and_unload() output_merged_10 = model_merged_10(input) assert torch.allclose(output_mixed_10, output_merged_10, atol=atol, rtol=rtol) def _check_unload(self, model_cls, config0, config1, input): # Ensure that we can unload the base model without merging atol = 1e-5 rtol = 1e-5 seed0 = 0 seed1 = 1 base_model = self._get_model(model_cls) output_base = base_model(input) # adapter 0 + 1 peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0) torch.manual_seed(seed1) peft_model_01.add_adapter("adapter1", config1) peft_model_01.set_adapter(["adapter0", "adapter1"]) output_mixed = peft_model_01(input) # unload model_unloaded = peft_model_01.unload() output_unloaded = model_unloaded(input) assert not torch.allclose(output_mixed, output_unloaded, atol=atol, rtol=rtol) assert torch.allclose(output_base, output_unloaded, atol=atol, rtol=rtol) def _check_disable(self, model_cls, config0, config1, input): # Ensure that we can disable adapters atol = 1e-5 rtol = 1e-5 seed0 = 0 seed1 = 1 # base model base_model = self._get_model(model_cls) output_base = base_model(input) # adapter 0 peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0) output_config0 = peft_model_0(input) with peft_model_0.disable_adapter(): output_disabled0 = peft_model_0(input) assert not torch.allclose(output_base, output_config0, atol=atol, rtol=rtol) assert torch.allclose(output_base, output_disabled0, atol=atol, rtol=rtol) # adapter 1 peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1) output_config1 = peft_model_1(input) with peft_model_1.disable_adapter(): output_disabled1 = peft_model_1(input) assert not torch.allclose(output_base, output_config1, atol=atol, rtol=rtol) assert torch.allclose(output_base, output_disabled1, atol=atol, rtol=rtol) # adapter 0 + 1 peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0) torch.manual_seed(seed1) peft_model_01.add_adapter("adapter1", config1) peft_model_01.set_adapter(["adapter0", "adapter1"]) output_mixed_01 = peft_model_01(input) with peft_model_01.disable_adapter(): output_disabled01 = peft_model_01(input) assert not torch.allclose(output_base, output_mixed_01, atol=atol, rtol=rtol) assert torch.allclose(output_base, output_disabled01, atol=atol, rtol=rtol) # adapter 1 + 0 peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1) torch.manual_seed(seed0) peft_model_10.add_adapter("adapter0", config0) peft_model_10.set_adapter(["adapter1", "adapter0"]) output_mixed_10 = peft_model_10(input) with peft_model_10.disable_adapter(): output_disabled10 = peft_model_10(input) assert not torch.allclose(output_base, output_mixed_10, atol=atol, rtol=rtol) assert torch.allclose(output_base, output_disabled10, atol=atol, rtol=rtol) def _check_loading(self, model_cls, config0, config1, input, *, is_commutative): # Check that we can load two adapters into the same model # Note that we save the adapters using a normal PeftModel because PeftMixModel doesn't support saving yet atol = 1e-5 rtol = 1e-5 seed0 = 0 seed1 = 1 with tempfile.TemporaryDirectory() as tmp_dirname: # SAVING # adapter 0: note that we set mixed=False because mixed models don't support saving (yet) peft_model_0 = self._get_model(model_cls, config0, "adapter0", seed=seed0, mixed=False) output_config0 = peft_model_0(input) peft_model_0.save_pretrained(os.path.join(tmp_dirname, "adapter0")) # adapter 1: note that we set mixed=False because mixed models don't support saving (yet) peft_model_1 = self._get_model(model_cls, config1, "adapter1", seed=seed1, mixed=False) output_config1 = peft_model_1(input) peft_model_1.save_pretrained(os.path.join(tmp_dirname, "adapter1")) # adapter 0 + 1 peft_model_01 = self._get_model(model_cls, config0, "adapter0", seed=seed0) torch.manual_seed(seed1) peft_model_01.add_adapter("adapter1", config1) peft_model_01.set_adapter(["adapter0", "adapter1"]) output_mixed_01 = peft_model_01(input) # adapter 1 + 0 peft_model_10 = self._get_model(model_cls, config1, "adapter1", seed=seed1) torch.manual_seed(seed0) peft_model_10.add_adapter("adapter0", config0) peft_model_10.set_adapter(["adapter1", "adapter0"]) output_mixed_10 = peft_model_10(input) # LOADING # adapter 0 base_model = self._get_model(model_cls) # Notes: # Path is tmp_dirname/adapter0/adapter0 because non-default adapters are saved in a subfolder. # As a sanity check, we should set a completely different seed here. That way, we ensure that the the # weights are not just randomly initialized exactly to the same values as before. torch.manual_seed(123456) peft_model_loaded0 = PeftMixedModel.from_pretrained( base_model, os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0" ) output_loaded0 = peft_model_loaded0(input) assert torch.allclose(output_config0, output_loaded0, atol=atol, rtol=rtol) # adapter 1 base_model = self._get_model(model_cls) torch.manual_seed(654321) # setting a completely different seed here should not affect the result peft_model_loaded1 = PeftMixedModel.from_pretrained( base_model, os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1" ) output_loaded1 = peft_model_loaded1(input) assert torch.allclose(output_config1, output_loaded1, atol=atol, rtol=rtol) # adapter 0 + 1 base_model = self._get_model(model_cls) torch.manual_seed(97531) # setting a completely different seed here should not affect the result peft_model_loaded_01 = PeftMixedModel.from_pretrained( base_model, os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0" ) peft_model_loaded_01.load_adapter(os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1") # at this point, "adapter0" should still be active assert peft_model_loaded_01.active_adapters == ["adapter0"] output_loaded01_0 = peft_model_loaded_01(input) assert torch.allclose(output_config0, output_loaded01_0, atol=atol, rtol=rtol) # activate adapter1 peft_model_loaded_01.set_adapter(["adapter1"]) assert peft_model_loaded_01.active_adapters == ["adapter1"] output_loaded01_1 = peft_model_loaded_01(input) assert torch.allclose(output_config1, output_loaded01_1, atol=atol, rtol=rtol) # activate both adapters peft_model_loaded_01.set_adapter(["adapter0", "adapter1"]) output_loaded01 = peft_model_loaded_01(input) assert torch.allclose(output_mixed_01, output_loaded01, atol=atol, rtol=rtol) # adapter 1 + 0 base_model = self._get_model(model_cls) torch.manual_seed(445566) # setting a completely different seed here should not affect the result peft_model_loaded_10 = PeftMixedModel.from_pretrained( base_model, os.path.join(tmp_dirname, "adapter1", "adapter1"), "adapter1" ) peft_model_loaded_10.load_adapter(os.path.join(tmp_dirname, "adapter0", "adapter0"), "adapter0") # at this point, "adapter1" should still be active assert peft_model_loaded_10.active_adapters == ["adapter1"] output_loaded10_1 = peft_model_loaded_10(input) assert torch.allclose(output_config1, output_loaded10_1, atol=atol, rtol=rtol) # activate adapter1 peft_model_loaded_10.set_adapter(["adapter0"]) assert peft_model_loaded_10.active_adapters == ["adapter0"] output_loaded10_0 = peft_model_loaded_10(input) assert torch.allclose(output_config0, output_loaded10_0, atol=atol, rtol=rtol) # activate both adapters peft_model_loaded_10.set_adapter(["adapter1", "adapter0"]) output_loaded10 = peft_model_loaded_10(input) assert torch.allclose(output_mixed_10, output_loaded10, atol=atol, rtol=rtol) if is_commutative: assert torch.allclose(output_loaded01, output_loaded10, atol=atol, rtol=rtol) assert torch.allclose(output_loaded10, output_mixed_01, atol=atol, rtol=rtol) @parameterized.expand( itertools.combinations( [ LoraConfig(target_modules=["lin0"], init_lora_weights=False), LoHaConfig(target_modules=["lin0"], init_weights=False), LoKrConfig(target_modules=["lin0"], init_weights=False), AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False, total_step=1), ], r=2, ), name_func=_param_name_func, ) def test_target_first_layer(self, config0, config1): input = torch.arange(90).reshape(9, 10).to(self.torch_device) self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False) self._check_merging(SimpleNet, config0, config1, input) self._check_unload(SimpleNet, config0, config1, input) self._check_disable(SimpleNet, config1, config0, input) self._check_loading(SimpleNet, config0, config1, input, is_commutative=False) @parameterized.expand( itertools.combinations( [ LoraConfig(target_modules=["lin1"], init_lora_weights=False), LoHaConfig(target_modules=["lin1"], init_weights=False), LoKrConfig(target_modules=["lin1"], init_weights=False), AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False, total_step=1), ], r=2, ), name_func=_param_name_func, ) def test_target_last_layer(self, config0, config1): # We are targeting the last layer of the SimpleNet. Therefore, since the adapters only add their activations # to the output, the results should be commutative. This would *not* work if the adapters do something more # complex or if we target an earlier layer, because of the non-linearity would destroy the commutativity. input = torch.arange(90).reshape(9, 10).to(self.torch_device) self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=True) self._check_merging(SimpleNet, config0, config1, input) self._check_unload(SimpleNet, config0, config1, input) self._check_disable(SimpleNet, config1, config0, input) self._check_loading(SimpleNet, config0, config1, input, is_commutative=True) @parameterized.expand( itertools.combinations( [ LoraConfig(init_lora_weights=False), LoHaConfig(init_weights=False), LoKrConfig(init_weights=False), AdaLoraConfig(init_lora_weights=False, total_step=1), ], r=2, ), name_func=_param_name_func, ) def test_target_different_layers(self, config0, config1): input = torch.arange(90).reshape(9, 10).to(self.torch_device) config0.target_modules = ["lin0"] config1.target_modules = ["lin1"] self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False) self._check_merging(SimpleNet, config0, config1, input) self._check_unload(SimpleNet, config0, config1, input) self._check_disable(SimpleNet, config0, config1, input) self._check_loading(SimpleNet, config0, config1, input, is_commutative=False) # same, but switch target_modules around config0.target_modules = ["lin1"] config1.target_modules = ["lin0"] self._check_mixed_outputs(SimpleNet, config1, config0, input, is_commutative=False) self._check_merging(SimpleNet, config1, config0, input) self._check_unload(SimpleNet, config1, config0, input) self._check_disable(SimpleNet, config1, config0, input) self._check_loading(SimpleNet, config1, config0, input, is_commutative=False) @parameterized.expand( [ ( LoraConfig(target_modules=["lin1"], init_lora_weights=False), LoraConfig(target_modules=["lin1"], init_lora_weights=False), ), ( LoHaConfig(target_modules=["lin1"], init_weights=False), LoHaConfig(target_modules=["lin1"], init_weights=False), ), ( LoKrConfig(target_modules=["lin1"], init_weights=False), LoKrConfig(target_modules=["lin1"], init_weights=False), ), ( AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False, total_step=1), AdaLoraConfig(target_modules=["lin1"], init_lora_weights=False, total_step=1), ), ], name_func=_param_name_func, ) def test_target_last_layer_same_type(self, config0, config1): input = torch.arange(90).reshape(9, 10).to(self.torch_device) self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=True) self._check_merging(SimpleNet, config0, config1, input) self._check_unload(SimpleNet, config0, config1, input) self._check_disable(SimpleNet, config1, config0, input) @parameterized.expand( [ ( LoraConfig(target_modules=["lin0"], init_lora_weights=False), LoraConfig(target_modules=["lin0"], init_lora_weights=False), ), ( LoHaConfig(target_modules=["lin0"], init_weights=False), LoHaConfig(target_modules=["lin0"], init_weights=False), ), ( LoKrConfig(target_modules=["lin0"], init_weights=False), LoKrConfig(target_modules=["lin0"], init_weights=False), ), ( AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False, total_step=1), AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False, total_step=1), ), ], name_func=_param_name_func, ) def test_target_first_layer_same_type(self, config0, config1): input = torch.arange(90).reshape(9, 10).to(self.torch_device) self._check_mixed_outputs(SimpleNet, config0, config1, input, is_commutative=False) self._check_merging(SimpleNet, config0, config1, input) self._check_unload(SimpleNet, config0, config1, input) self._check_disable(SimpleNet, config1, config0, input) self._check_loading(SimpleNet, config0, config1, input, is_commutative=False) def test_deeply_nested(self): # a somewhat absurdly nested model using different adapter types if platform.system() == "Linux": self.skipTest("This test fails but only on GitHub CI with Linux systems.") atol = 1e-5 rtol = 1e-5 torch.manual_seed(0) model = SimpleNet().eval().to(self.torch_device) input = torch.arange(90).reshape(9, 10).to(self.torch_device) output_base = model(input) config0 = LoraConfig(r=4, lora_alpha=4, target_modules=["lin0", "lin1"], init_lora_weights=False) peft_model = get_peft_model(model, config0, "adapter0", mixed=True) config1 = LoHaConfig(r=4, alpha=4, target_modules=["lin0"], init_weights=False) peft_model.add_adapter("adapter1", config1) config2 = AdaLoraConfig(r=4, lora_alpha=4, target_modules=["lin1"], init_lora_weights=False, total_step=1) peft_model.add_adapter("adapter2", config2) config3 = LoKrConfig(r=4, alpha=4, target_modules=["lin0", "lin1"], init_weights=False) peft_model.add_adapter("adapter3", config3) peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"]) output_mixed = peft_model(input) assert torch.isfinite(output_base).all() assert not torch.allclose(output_base, output_mixed, atol=atol, rtol=rtol) # test disabling all adapters with peft_model.disable_adapter(): output_disabled = peft_model(input) assert torch.isfinite(output_disabled).all() assert torch.allclose(output_base, output_disabled, atol=atol, rtol=rtol) assert not torch.allclose(output_mixed, output_disabled, atol=atol, rtol=rtol) # merge and unload all adapters model_copy = copy.deepcopy(peft_model) model = model_copy.merge_and_unload() output_merged = model(input) assert torch.isfinite(output_merged).all() assert torch.allclose(output_mixed, output_merged, atol=atol, rtol=rtol) # merge and unload only adapter1 and adapter3 model_copy = copy.deepcopy(peft_model) model_copy.set_adapter(["adapter1", "adapter3"]) output_13 = model_copy(input) assert torch.isfinite(output_13).all() assert not torch.allclose(output_mixed, output_13, atol=atol, rtol=rtol) model_copy.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"]) model_merged_unloaded = model_copy.merge_and_unload(adapter_names=["adapter1", "adapter3"]) output_merged_13 = model_merged_unloaded(input) assert torch.isfinite(output_merged_13).all() assert torch.allclose(output_13, output_merged_13, atol=atol, rtol=rtol) # test unloading model_copy = copy.deepcopy(peft_model) model_unloaded = model_copy.unload() output_unloaded = model_unloaded(input) assert torch.isfinite(output_unloaded).all() assert torch.allclose(output_base, output_unloaded, atol=atol, rtol=rtol) def test_delete_adapter(self): atol = 1e-5 rtol = 1e-5 torch.manual_seed(0) model = SimpleNet().eval().to(self.torch_device) input = torch.arange(90).reshape(9, 10).to(self.torch_device) output_base = model(input) # create adapter0 torch.manual_seed(0) config0 = LoraConfig(r=4, lora_alpha=4, target_modules=["lin0", "lin1"], init_lora_weights=False) peft_model = get_peft_model(model, config0, "adapter0", mixed=True) output_0 = peft_model(input) assert not torch.allclose(output_base, output_0, atol=atol, rtol=rtol) # add adapter1 torch.manual_seed(1) config1 = LoHaConfig(r=4, alpha=4, target_modules=["lin0"], init_weights=False) peft_model.add_adapter("adapter1", config1) peft_model.set_adapter(["adapter0", "adapter1"]) output_01 = peft_model(input) assert not torch.allclose(output_base, output_01, atol=atol, rtol=rtol) assert not torch.allclose(output_0, output_01, atol=atol, rtol=rtol) # delete adapter1 peft_model.delete_adapter("adapter1") assert peft_model.active_adapters == ["adapter0"] output_deleted_1 = peft_model(input) assert torch.allclose(output_0, output_deleted_1, atol=atol, rtol=rtol) msg = re.escape("Adapter(s) ['adapter1'] not found, available adapters: ['adapter0']") with pytest.raises(ValueError, match=msg): peft_model.set_adapter(["adapter0", "adapter1"]) # re-add adapter1 torch.manual_seed(1) peft_model.add_adapter("adapter1", config1) peft_model.set_adapter(["adapter0", "adapter1"]) output_01_readded = peft_model(input) assert not torch.allclose(output_base, output_01_readded, atol=atol, rtol=rtol) # same as above, but this time delete adapter0 first torch.manual_seed(0) model = SimpleNet().eval().to(self.torch_device) torch.manual_seed(0) peft_model = get_peft_model(model, config0, "adapter0", mixed=True) torch.manual_seed(1) peft_model.add_adapter("adapter1", config1) peft_model.delete_adapter("adapter0") assert peft_model.active_adapters == ["adapter1"] output_deleted_0 = peft_model(input) assert not torch.allclose(output_deleted_0, output_base, atol=atol, rtol=rtol) assert not torch.allclose(output_deleted_0, output_01, atol=atol, rtol=rtol) msg = re.escape("Adapter(s) ['adapter0'] not found, available adapters: ['adapter1']") with pytest.raises(ValueError, match=msg): peft_model.set_adapter(["adapter0", "adapter1"]) peft_model.delete_adapter("adapter1") assert peft_model.active_adapters == [] output_deleted_01 = peft_model(input) assert torch.allclose(output_deleted_01, output_base, atol=atol, rtol=rtol) def test_modules_to_save(self): model = SimpleNet().eval().to(self.torch_device) config0 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) peft_model = get_peft_model(model, config0, "adapter0", mixed=True) # adding a second adapter with same modules_to_save is not allowed # TODO: theoretically, we could allow this if it's the same target layer config1 = LoHaConfig(target_modules=["lin0"], modules_to_save=["lin1"]) peft_model.add_adapter("adapter1", config1) with pytest.raises(ValueError, match="Only one adapter can be set at a time for modules_to_save"): peft_model.set_adapter(["adapter0", "adapter1"]) def test_get_nb_trainable_parameters(self): model = SimpleNet().eval().to(self.torch_device) params_base = sum(p.numel() for p in model.parameters()) config0 = LoraConfig(target_modules=["lin0"]) peft_model = get_peft_model(model, config0, "adapter0", mixed=True) trainable_params0, all_param0 = peft_model.get_nb_trainable_parameters() params_lora = sum(p.numel() for n, p in model.named_parameters() if "adapter0" in n) assert trainable_params0 == params_lora assert all_param0 == (params_base + params_lora) config1 = LoHaConfig(target_modules=["lin1"]) peft_model.add_adapter("adapter1", config1) peft_model.set_adapter(["adapter0", "adapter1"]) params_loha = sum(p.numel() for n, p in model.named_parameters() if "adapter1" in n) trainable_params1, all_param1 = peft_model.get_nb_trainable_parameters() assert trainable_params1 == (params_lora + params_loha) assert all_param1 == ((params_base + params_lora) + params_loha) config2 = AdaLoraConfig(target_modules=["lin0", "lin1"], total_step=1) peft_model.add_adapter("adapter2", config2) peft_model.set_adapter(["adapter0", "adapter1", "adapter2"]) params_adalora = sum(p.numel() for n, p in model.named_parameters() if "adapter2" in n) trainable_params2, all_param2 = peft_model.get_nb_trainable_parameters() # remove 2 params because we need to exclude "ranknum" for AdaLora trainable params assert trainable_params2 == (((params_lora + params_loha) + params_adalora) - 2) assert all_param2 == (((params_base + params_lora) + params_loha) + params_adalora) def test_incompatible_config_raises(self): model = SimpleNet().eval().to(self.torch_device) config0 = LoraConfig(target_modules=["lin0"]) peft_model = get_peft_model(model, config0, "adapter0", mixed=True) config1 = PrefixTuningConfig() msg = "The provided `peft_type` 'PREFIX_TUNING' is not compatible with the `PeftMixedModel`." with pytest.raises(ValueError, match=msg): peft_model.add_adapter("adapter1", config1) def test_decoder_model(self): # test a somewhat realistic model instead of a toy model torch.manual_seed(0) model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device) input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } output_base = model.generate(**input_dict) torch.manual_seed(0) config0 = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False) peft_model = get_peft_model(model, config0, "adapter0", mixed=True) output0 = peft_model.generate(**input_dict) assert torch.isfinite(output0).all() assert not torch.allclose(output_base, output0) torch.manual_seed(1) config1 = LoHaConfig(task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], init_weights=False) peft_model.add_adapter("adapter1", config1) peft_model.set_adapter(["adapter0", "adapter1"]) output1 = peft_model.generate(**input_dict) assert torch.isfinite(output1).all() assert not torch.allclose(output0, output1) torch.manual_seed(2) config2 = AdaLoraConfig(task_type="CAUSAL_LM", init_lora_weights=False, total_step=1) peft_model.add_adapter("adapter2", config2) peft_model.set_adapter(["adapter0", "adapter1", "adapter2"]) output2 = peft_model.generate(**input_dict) assert torch.isfinite(output2).all() assert not torch.allclose(output1, output2) torch.manual_seed(3) config3 = LoKrConfig(task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], init_weights=False) peft_model.add_adapter("adapter3", config3) peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"]) output3 = peft_model.generate(**input_dict) assert torch.isfinite(output3).all() assert not torch.allclose(output2, output3) torch.manual_seed(4) peft_model.set_adapter(["adapter0", "adapter1", "adapter2", "adapter3"]) with peft_model.disable_adapter(): output_disabled = peft_model.generate(**input_dict) assert torch.isfinite(output_disabled).all() assert torch.allclose(output_base, output_disabled) model_unloaded = peft_model.merge_and_unload() output_unloaded = model_unloaded.generate(**input_dict) assert torch.isfinite(output_unloaded).all() with tempfile.TemporaryDirectory() as tmp_dir: # save adapter0 (use normal PeftModel, because PeftMixedModel does not support saving) torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device) torch.manual_seed(0) peft_model = get_peft_model(model, config0, "adapter0") output0_save = peft_model(**input_dict).logits assert torch.isfinite(output0_save).all() peft_model.save_pretrained(tmp_dir) # save adapter1 torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device) torch.manual_seed(1) peft_model = get_peft_model(model, config1, "adapter1") output1_save = peft_model(**input_dict).logits assert torch.isfinite(output1_save).all() peft_model.save_pretrained(tmp_dir) # load adapter0 and adapter1 model = AutoModelForCausalLM.from_pretrained(model_id).eval().to(self.torch_device) peft_model = PeftMixedModel.from_pretrained(model, os.path.join(tmp_dir, "adapter0"), "adapter0") peft_model.load_adapter(os.path.join(tmp_dir, "adapter1"), "adapter1") peft_model.set_adapter(["adapter0", "adapter1"]) output01_loaded = peft_model(**input_dict).logits atol, rtol = 1e-3, 1e-3 assert torch.isfinite(output01_loaded).all() assert not torch.allclose(output0_save, output01_loaded, atol=atol, rtol=rtol) assert not torch.allclose(output1_save, output01_loaded, atol=atol, rtol=rtol)
peft/tests/test_mixed.py/0
{ "file_path": "peft/tests/test_mixed.py", "repo_id": "peft", "token_count": 16988 }
251
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import pickle import platform import re import shutil import tempfile import warnings from dataclasses import replace import pytest import torch import yaml from diffusers import StableDiffusionPipeline from packaging import version from safetensors.torch import save_file from peft import ( AdaLoraConfig, BOFTConfig, BoneConfig, CPTConfig, FourierFTConfig, HRAConfig, IA3Config, LNTuningConfig, LoHaConfig, LoKrConfig, LoraConfig, MissConfig, OFTConfig, PeftModel, PeftType, PrefixTuningConfig, PromptEncoderConfig, PromptLearningConfig, PromptTuningConfig, RandLoraConfig, VBLoRAConfig, VeraConfig, get_peft_model, get_peft_model_state_dict, inject_adapter_in_model, prepare_model_for_kbit_training, ) from peft.tuners._buffer_dict import BufferDict from peft.tuners.lora import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ( AuxiliaryTrainingWrapper, ModulesToSaveWrapper, TrainableTokensWrapper, _get_submodules, infer_device, ) from .testing_utils import get_state_dict, hub_online_once CONFIG_TESTING_KWARGS = ( # IA³ { "target_modules": None, "feedforward_modules": None, }, # LoRA { "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", }, # prefix tuning { "num_virtual_tokens": 10, }, # prompt encoder { "num_virtual_tokens": 10, "encoder_hidden_size": 32, }, # prompt tuning { "num_virtual_tokens": 10, }, # AdaLoRA { "target_modules": None, "total_step": 1, }, # BOFT { "target_modules": None, }, # VeRA { "r": 8, "target_modules": None, "vera_dropout": 0.05, "projection_prng_key": 0xFF, "d_initial": 0.1, "save_projection": True, "bias": "none", }, # FourierFT { "n_frequency": 10, "target_modules": None, }, # HRA { "target_modules": None, }, # VBLoRA {"target_modules": None, "vblora_dropout": 0.05, "vector_length": 1, "num_vectors": 2}, # OFT { "target_modules": None, }, # Bone { "target_modules": None, "r": 2, }, # MiSS { "target_modules": None, "r": 2, }, # LoRA + trainable_tokens { "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", "trainable_token_indices": [0, 1, 3], }, # RandLoRA { "r": 32, "randlora_alpha": 64, "target_modules": None, "randlora_dropout": 0.05, "projection_prng_key": 0xFF, "save_projection": True, "bias": "none", }, # CPT tuninig { "cpt_token_ids": [0, 1, 2, 3, 4, 5, 6, 7], # Example token IDs for testing "cpt_mask": [1, 1, 1, 1, 1, 1, 1, 1], "cpt_tokens_type_mask": [1, 2, 2, 2, 3, 3, 4, 4], }, ) CLASSES_MAPPING = { "ia3": (IA3Config, CONFIG_TESTING_KWARGS[0]), "lora": (LoraConfig, CONFIG_TESTING_KWARGS[1]), "prefix_tuning": (PrefixTuningConfig, CONFIG_TESTING_KWARGS[2]), "prompt_encoder": (PromptEncoderConfig, CONFIG_TESTING_KWARGS[3]), "prompt_tuning": (PromptTuningConfig, CONFIG_TESTING_KWARGS[4]), "adalora": (AdaLoraConfig, CONFIG_TESTING_KWARGS[5]), "boft": (BOFTConfig, CONFIG_TESTING_KWARGS[6]), "vera": (VeraConfig, CONFIG_TESTING_KWARGS[7]), "fourierft": (FourierFTConfig, CONFIG_TESTING_KWARGS[8]), "hra": (HRAConfig, CONFIG_TESTING_KWARGS[9]), "vblora": (VBLoRAConfig, CONFIG_TESTING_KWARGS[10]), "oft": (OFTConfig, CONFIG_TESTING_KWARGS[11]), "bone": (BoneConfig, CONFIG_TESTING_KWARGS[12]), "miss": (MissConfig, CONFIG_TESTING_KWARGS[12]), "lora+trainable_tokens": (LoraConfig, CONFIG_TESTING_KWARGS[13]), "randlora": (RandLoraConfig, CONFIG_TESTING_KWARGS[14]), } DECODER_MODELS_EXTRA = {"cpt": (CPTConfig, CONFIG_TESTING_KWARGS[15])} class PeftCommonTester: r""" A large testing suite for testing common functionality of the PEFT models. Attributes: torch_device (`torch.device`): The device on which the tests will be run. transformers_class (`transformers.PreTrainedModel`): The transformers class that is being tested. """ torch_device = infer_device() transformers_class = None def prepare_inputs_for_common(self): raise NotImplementedError def check_modelcard(self, tmp_dirname, model): # check the generated README.md filename = os.path.join(tmp_dirname, "README.md") assert os.path.exists(filename) with open(filename, encoding="utf-8") as f: readme = f.read() metainfo = re.search(r"---\n(.*?)\n---", readme, re.DOTALL).group(1) dct = yaml.safe_load(metainfo) assert dct["library_name"] == "peft" if hasattr(model, "config"): assert dct["base_model"] == model.config.to_dict()["_name_or_path"] else: # a custom model assert "base_model" not in dct # The Hub expects the lora tag to be set for PEFT LoRA models since they # have explicit support for things like inference. if model.active_peft_config.peft_type.value == "LORA": assert "lora" in dct["tags"] def check_config_json(self, tmp_dirname, model): # check the generated config.json filename = os.path.join(tmp_dirname, "adapter_config.json") assert os.path.exists(filename) with open(filename, encoding="utf-8") as f: config = json.load(f) if hasattr(model, "config"): # custom models don't have a config attribute assert config["base_model_name_or_path"] == model.config.to_dict()["_name_or_path"] def perturb_trainable_token_weights_if_used(self, model, config_kwargs, adapter_name="default", scale=1.0): """TrainableTokensLayer is initialized to be a no-op by default. Since there's currently no way to pass `init_weights=False` to the trainable tokens layer when used in conjunction with LoRA, we have to do it like this to make sure that it is *not* a no-op (essentially simulating "training" of the adapter). """ if "trainable_token_indices" not in config_kwargs: return token_wrapper = None if hasattr(model, "get_input_embeddings"): token_wrapper = model.get_input_embeddings() else: for module in model.modules(): if isinstance(module, TrainableTokensWrapper): token_wrapper = module break # for a model with trainable_token_indices there should always be a trainable token wrapper somewhere. # if not, then there's something broken. assert token_wrapper is not None token_wrapper.token_adapter.trainable_tokens_delta[adapter_name].data = ( torch.rand_like(token_wrapper.token_adapter.trainable_tokens_delta[adapter_name].data) * scale ) def _test_model_attr(self, model_id, config_cls, config_kwargs): with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) assert hasattr(model, "save_pretrained") assert hasattr(model, "from_pretrained") assert hasattr(model, "push_to_hub") def _test_adapter_name(self, model_id, config_cls, config_kwargs): with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter") correctly_converted = False for n, _ in model.named_parameters(): if "test-adapter" in n: correctly_converted = True break assert correctly_converted def _test_prepare_for_training(self, model_id, config_cls, config_kwargs): if config_kwargs.get("trainable_token_indices", None) is not None: # incompatible because trainable tokens is marking embeddings as trainable self.skipTest("Trainable tokens is incompatible with this test.") # some tests require specific tokenizers, make sure that they can be fetched as well with hub_online_once(model_id + config_kwargs.get("tokenizer_name_or_path", "")): model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) assert not dummy_output.requires_grad # load with `prepare_model_for_kbit_training` model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = prepare_model_for_kbit_training(model) for param in model.parameters(): assert not param.requires_grad config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) assert dummy_output.requires_grad def _test_load_model_low_cpu_mem_usage(self, model_id, config_cls, config_kwargs): # Ensure that low_cpu_mem_usage=True works for from_pretrained and load_adapter and that the resulting model's # parameters are on the correct device. with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) # note: not using the context manager here because it fails on Windows CI for some reason tmp_dirname = tempfile.mkdtemp() try: model.save_pretrained(tmp_dirname) model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = PeftModel.from_pretrained( model, tmp_dirname, torch_device=self.torch_device, low_cpu_mem_usage=True ) assert {p.device.type for p in model.parameters()} == {self.torch_device} model.load_adapter(tmp_dirname, adapter_name="other", low_cpu_mem_usage=True) assert {p.device.type for p in model.parameters()} == {self.torch_device} finally: try: shutil.rmtree(tmp_dirname) except PermissionError: # windows error pass # also test injecting directly del model model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) inject_adapter_in_model(config, model, low_cpu_mem_usage=True) # check that there is no error if not isinstance(config, LNTuningConfig): # LN tuning does not add adapter layers that could be on meta device, it only changes the requires_grad. # Therefore, there is no meta device for LN tuning. assert "meta" in {p.device.type for p in model.parameters()} def _test_save_pretrained(self, model_id, config_cls, config_kwargs, safe_serialization=True): # ensure that the weights are randomly initialized if issubclass(config_cls, LoraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False if issubclass(config_cls, IA3Config): config_kwargs = config_kwargs.copy() config_kwargs["init_ia3_weights"] = False if hasattr(config_cls, "init_weights"): config_kwargs = config_kwargs.copy() config_kwargs["init_weights"] = False with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: if safe_serialization: model.save_pretrained(tmp_dirname) else: model.save_pretrained(tmp_dirname, safe_serialization=False) model_from_pretrained = self.transformers_class.from_pretrained(model_id) with warnings.catch_warnings(record=True) as recs: model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # ensure that there is no warning assert not any("Found missing adapter keys" in str(rec.message) for rec in recs) # check if the state dicts are equal if issubclass(config_cls, PromptEncoderConfig): # For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load # adapter-specific weights for comparison. # TODO: is this expected? state_dict = get_peft_model_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True) else: state_dict = get_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True) # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin" # check if `adapter_model.safetensors` is present assert os.path.exists(os.path.join(tmp_dirname, target_adapter_filename)) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) self.check_modelcard(tmp_dirname, model) self.check_config_json(tmp_dirname, model) def _test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs, safe_serialization=True): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return pytest.skip(f"Test not applicable for {config_cls}") # ensure that the weights are randomly initialized if issubclass(config_cls, LoraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False elif issubclass(config_cls, IA3Config): config_kwargs = config_kwargs.copy() config_kwargs["init_ia3_weights"] = False elif hasattr(config_cls, "init_weights"): config_kwargs["init_weights"] = False with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) new_adapter_config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model.add_adapter("new_adapter", new_adapter_config) with tempfile.TemporaryDirectory() as tmp_dirname: if safe_serialization: model.save_pretrained(tmp_dirname) else: model.save_pretrained(tmp_dirname, safe_serialization=False) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) new_adapter_dir = os.path.join(tmp_dirname, "new_adapter") model_from_pretrained.load_adapter(new_adapter_dir, "new_adapter") # check if the state dicts are equal if issubclass(config_cls, PromptEncoderConfig): # For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load # adapter-specific weights for comparison. # TODO: is this expected? state_dict = get_peft_model_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True) else: state_dict = get_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin" # check if `adapter_model.safetensors` is present assert os.path.exists(os.path.join(tmp_dirname, target_adapter_filename)) assert os.path.exists(os.path.join(new_adapter_dir, target_adapter_filename)) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) assert os.path.exists(os.path.join(new_adapter_dir, "adapter_config.json")) # check if `model.safetensors` is not present assert not os.path.exists(os.path.join(tmp_dirname, "model.safetensors")) assert not os.path.exists(os.path.join(new_adapter_dir, "model.safetensors")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) assert not os.path.exists(os.path.join(new_adapter_dir, "config.json")) self.check_modelcard(tmp_dirname, model) self.check_config_json(tmp_dirname, model) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, selected_adapters=["default"]) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) assert "default" in model_from_pretrained.peft_config.keys() assert "new_adapter" not in model_from_pretrained.peft_config.keys() def _test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs): with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls(base_model_name_or_path=model_id, **config_kwargs) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained( model_from_pretrained, tmp_dirname, is_trainable=False, config=config ) assert model_from_pretrained.peft_config["default"].inference_mode assert model_from_pretrained.peft_config["default"] is config def _test_load_multiple_adapters(self, model_id, config_cls, config_kwargs): # just ensure that this works and raises no error with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) del model model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = PeftModel.from_pretrained(model, tmp_dirname, torch_device=self.torch_device) load_result1 = model.load_adapter(tmp_dirname, adapter_name="other") load_result2 = model.load_adapter(tmp_dirname, adapter_name="yet-another") # VBLoRA uses a shared "vblora_vector_bank" across all layers, causing it to appear # in the missing keys list, which leads to failed test cases. So # skipping the missing keys check for VBLoRA. if config.peft_type != "VBLORA": assert load_result1.missing_keys == [] assert load_result2.missing_keys == [] def _test_merge_layers_fp16(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, IA3Config, AdaLoraConfig, LoHaConfig, LoKrConfig, VBLoRAConfig): # Merge layers only supported for LoRA and IA³ return pytest.skip(f"Test not applicable for {config_cls}") if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") if (self.torch_device in ["cpu"]) and (version.parse(torch.__version__) <= version.parse("2.1")): self.skipTest("PyTorch 2.1 not supported for Half of addmm_impl_cpu_ ") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.float16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(device=self.torch_device, dtype=torch.float16) model.eval() # This should simply work _ = model.merge_and_unload() def _test_merge_layers_nan(self, model_id, config_cls, config_kwargs): if config_cls not in ( LoraConfig, IA3Config, AdaLoraConfig, LoHaConfig, LoKrConfig, VeraConfig, FourierFTConfig, ): # Merge layers only supported for LoRA and IA³ return if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") if "gemma" in model_id.lower(): # TODO: could be related to tied weights self.skipTest("Merging currently fails with gemma") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) self.perturb_trainable_token_weights_if_used(model, config_kwargs) dummy_input = self.prepare_inputs_for_testing() model.eval() # This should work logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() logits_merged = model(**dummy_input)[0] assert torch.allclose(logits_unmerged, logits_merged, atol=1e-3, rtol=1e-3) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) for name, module in model.named_parameters(): if ( "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name or "vera_lambda" in name or "fourierft_spectrum" in name ): module.data[0] = torch.nan with pytest.raises( ValueError, match="NaNs detected in the merged weights. The adapter default seems to be broken" ): model = model.merge_and_unload(safe_merge=True) for name, module in model.named_parameters(): if ( "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name or "vera_lambda" in name or "fourierft_spectrum" in name ): module.data[0] = torch.inf with pytest.raises( ValueError, match="NaNs detected in the merged weights. The adapter default seems to be broken" ): model = model.merge_and_unload(safe_merge=True) def _test_merge_layers(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if issubclass(config_cls, (OFTConfig, BOFTConfig)): return pytest.skip(f"Test not applicable for {config_cls}") if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") if "gemma" in model_id.lower(): # TODO: could be related to tied weights self.skipTest("Merging currently fails with gemma") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) self.perturb_trainable_token_weights_if_used(model, config_kwargs) dummy_input = self.prepare_inputs_for_testing() model.eval() logits = model(**dummy_input)[0] model.merge_adapter() logits_merged = model(**dummy_input)[0] model.unmerge_adapter() logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() # check that PEFT layers are completely removed assert not any(isinstance(module, BaseTunerLayer) for module in model.modules()) logits_merged_unloaded = model(**dummy_input)[0] conv_ids = ["Conv2d", "Conv3d", "Conv2d2"] atol, rtol = 1e-4, 1e-4 if self.torch_device in ["mlu"]: atol, rtol = 1e-3, 1e-3 # MLU if config.peft_type == "ADALORA": # AdaLoRA is a bit flaky on CI, but this cannot be reproduced locally atol, rtol = 1e-2, 1e-2 if (config.peft_type in {"IA3", "LORA"}) and (model_id in conv_ids): # for some reason, the Conv introduces a larger error atol, rtol = 0.3, 0.01 if model_id == "trl-internal-testing/tiny-Llama4ForCausalLM": # also getting larger errors here, not exactly sure why atol, rtol = 0.3, 0.01 assert torch.allclose(logits, logits_merged, atol=atol, rtol=rtol) assert torch.allclose(logits, logits_unmerged, atol=atol, rtol=rtol) assert torch.allclose(logits, logits_merged_unloaded, atol=atol, rtol=rtol) # For this test to work, weights should not be initialized to identity transform (e.g. # init_lora_weights should be False). transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] assert not torch.allclose(logits_merged, logits_transformers, atol=1e-10, rtol=1e-10) # test that the logits are identical after a save-load-roundtrip if hasattr(model, "save_pretrained"): # model is a transformers model tmp_dirname = tempfile.mkdtemp() # note: not using the context manager here because it fails on Windows CI for some reason try: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(tmp_dirname).to(self.torch_device) finally: try: shutil.rmtree(tmp_dirname) except PermissionError: # windows error pass else: # model is not a transformers model model_from_pretrained = pickle.loads(pickle.dumps(model)) logits_merged_from_pretrained = model_from_pretrained(**dummy_input)[0] assert torch.allclose(logits_merged, logits_merged_from_pretrained, atol=atol, rtol=rtol) def _test_merge_layers_multi(self, model_id, config_cls, config_kwargs): supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.HRA, PeftType.BONE, PeftType.MISS, ] if ("gpt2" in model_id.lower()) and (config_cls == IA3Config): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") if config_kwargs.get("trainable_token_indices", None) is not None: self.skipTest( "Merging two adapters with trainable tokens is tested elsewhere since adapters with " "the same token indices cannot be merged." ) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() with torch.inference_mode(): logits_adapter_1 = model(**dummy_input)[0] model.add_adapter("adapter-2", config) model.set_adapter("adapter-2") model.eval() # sanity check: each adapter layer with a 'default' adapter should also have 'adapter-2' containers = (torch.nn.ModuleDict, torch.nn.ParameterDict, BufferDict) num_default = len([m for m in model.modules() if isinstance(m, containers) and "default" in m]) num_adapter2 = len([m for m in model.modules() if isinstance(m, containers) and "adapter-2" in m]) assert num_default > 0 assert num_default == num_adapter2 with torch.inference_mode(): logits_adapter_2 = model(**dummy_input)[0] assert not torch.allclose(logits_adapter_1, logits_adapter_2, atol=1e-3, rtol=1e-3) model.set_adapter("default") with torch.inference_mode(): logits_adapter_1_after_set = model(**dummy_input)[0] assert torch.allclose(logits_adapter_1_after_set, logits_adapter_1, atol=1e-3, rtol=1e-3) model_copy = copy.deepcopy(model) model_copy_2 = copy.deepcopy(model) model_merged_all = model.merge_and_unload(adapter_names=["adapter-2", "default"]) with torch.inference_mode(): logits_merged_all = model_merged_all(**dummy_input)[0] assert not torch.allclose(logits_merged_all, logits_adapter_2, atol=1e-3, rtol=1e-3) assert not torch.allclose(logits_merged_all, logits_adapter_1, atol=1e-3, rtol=1e-3) model_merged_adapter_2 = model_copy.merge_and_unload(adapter_names=["adapter-2"]) with torch.inference_mode(): logits_merged_adapter_2 = model_merged_adapter_2(**dummy_input)[0] assert torch.allclose(logits_merged_adapter_2, logits_adapter_2, atol=1e-3, rtol=1e-3) model_merged_adapter_default = model_copy_2.merge_and_unload(adapter_names=["default"]) with torch.inference_mode(): logits_merged_adapter_default = model_merged_adapter_default(**dummy_input)[0] assert torch.allclose(logits_merged_adapter_default, logits_adapter_1, atol=1e-3, rtol=1e-3) def _test_merge_layers_is_idempotent(self, model_id, config_cls, config_kwargs): with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) model.eval() torch.manual_seed(0) model.merge_adapter() logits_0 = model(**self.prepare_inputs_for_testing())[0] # merging again should not change anything # also check warning: with pytest.warns(UserWarning, match="All adapters are already merged, nothing to do"): model.merge_adapter() logits_1 = model(**self.prepare_inputs_for_testing())[0] assert torch.allclose(logits_0, logits_1, atol=1e-6, rtol=1e-6) def _test_safe_merge(self, model_id, config_cls, config_kwargs): torch.manual_seed(0) with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = model.to(self.torch_device).eval() inputs = self.prepare_inputs_for_testing() logits_base = model(**inputs)[0] model = get_peft_model(model, config).eval() logits_peft = model(**inputs)[0] atol, rtol = 1e-6, 1e-6 # default # Initializing with LN tuning cannot be configured to change the outputs (unlike init_lora_weights=False) if not issubclass(config_cls, LNTuningConfig): # sanity check that the logits are different assert not torch.allclose(logits_base, logits_peft, atol=atol, rtol=rtol) model_unloaded = model.merge_and_unload(safe_merge=True) logits_unloaded = model_unloaded(**inputs)[0] if self.torch_device in ["mlu"]: atol, rtol = 1e-3, 1e-3 # MLU conv_ids = ["Conv2d", "Conv3d", "Conv2d2"] if issubclass(config_cls, (IA3Config, LoraConfig)) and model_id in conv_ids: # more instability with Conv atol, rtol = 1e-3, 1e-3 # check that the logits are the same after unloading assert torch.allclose(logits_peft, logits_unloaded, atol=atol, rtol=rtol) # Ensure that serializing with safetensors works, there was an error when weights were not contiguous with tempfile.TemporaryDirectory() as tmp_dirname: # serializing with torch.save works torch.save(model_unloaded.state_dict(), os.path.join(tmp_dirname, "model.bin")) # serializing with safetensors works save_file(model_unloaded.state_dict(), os.path.join(tmp_dirname, "model.safetensors")) def _test_mixed_adapter_batches(self, model_id, config_cls, config_kwargs): # Test for mixing different adapters in a single batch by passing the adapter_names argument if config_cls not in (LoraConfig,): return pytest.skip(f"Mixed adapter batches not supported for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) torch.manual_seed(0) with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_name="adapter0").eval() model.add_adapter("adapter1", config) model = model.to(self.torch_device).eval() self.perturb_trainable_token_weights_if_used(model, config_kwargs, adapter_name="adapter0") self.perturb_trainable_token_weights_if_used(model, config_kwargs, adapter_name="adapter1") dummy_input = self.prepare_inputs_for_testing() # ensure that we have at least 3 samples for this test dummy_input = {k: torch.cat([v for _ in range(3)]) for k, v in dummy_input.items()} with torch.inference_mode(): with model.disable_adapter(): output_base = model(**dummy_input)[0] logits_base = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] model.set_adapter("adapter0") with torch.inference_mode(): output_adapter0 = model(**dummy_input)[0] logits_adapter0 = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] model.set_adapter("adapter1") with torch.inference_mode(): output_adapter1 = model(**dummy_input)[0] logits_adapter1 = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] atol, rtol = 1e-4, 1e-4 # sanity check that there are enough outputs and that they are different assert len(output_base) == len(output_adapter0) == len(output_adapter1) >= 3 assert len(logits_base) == len(logits_adapter0) == len(logits_adapter1) >= 3 assert not torch.allclose(output_base, output_adapter0, atol=atol, rtol=rtol) assert not torch.allclose(output_base, output_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(output_adapter0, output_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(logits_base, logits_adapter0, atol=atol, rtol=rtol) assert not torch.allclose(logits_base, logits_adapter1, atol=atol, rtol=rtol) assert not torch.allclose(logits_adapter0, logits_adapter1, atol=atol, rtol=rtol) # alternate between base model, adapter0, and adapter1 adapters = ["__base__", "adapter0", "adapter1"] dummy_input["adapter_names"] = [adapters[i % 3] for i in (range(len(dummy_input["input_ids"])))] with torch.inference_mode(): output_mixed = model(**dummy_input)[0] logits_mixed = model.generate(**dummy_input, return_dict_in_generate=True, output_scores=True).scores[0] assert torch.allclose(output_base[::3], output_mixed[::3], atol=atol, rtol=rtol) assert torch.allclose(output_adapter0[1::3], output_mixed[1::3], atol=atol, rtol=rtol) assert torch.allclose(output_adapter1[2::3], output_mixed[2::3], atol=atol, rtol=rtol) assert torch.allclose(logits_base[::3], logits_mixed[::3], atol=atol, rtol=rtol) assert torch.allclose(logits_adapter0[1::3], logits_mixed[1::3], atol=atol, rtol=rtol) assert torch.allclose(logits_adapter1[2::3], logits_mixed[2::3], atol=atol, rtol=rtol) def _test_generate_with_mixed_adapter_batches_and_beam_search(self, model_id, config_cls, config_kwargs): # Test generating with beam search and with mixing different adapters in a single batch by passing the # adapter_names argument. See #2283. if config_cls not in (LoraConfig,): return pytest.skip(f"Mixed adapter batches not supported for {config_cls}") if config_kwargs.get("trainable_token_indices", None) is not None: # for some configurations this test will fail since the adapter values don't differ. # this is probably a problem with the test setup and not with the implementation. return pytest.skip("Trainable token indices is not supported here (yet).") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) torch.manual_seed(0) with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_name="adapter0").eval() model.add_adapter("adapter1", config) # In contrast to forward, for generate, it can sometimes happen that we get the same results as the base model # even with LoRA applied because the impact of LoRA is not big enough. Therefore, use this "trick" to make LoRA # stronger. for name, param in model.named_parameters(): if model.base_model.prefix in name: param.data.mul_(10.0) model = model.to(self.torch_device).eval() dummy_input = self.prepare_inputs_for_testing() # ensure that we have at least 3 samples for this test dummy_input = {k: torch.cat([v for _ in range(3)]) for k, v in dummy_input.items()} gen_kwargs = {**dummy_input, "max_length": 20, "num_beams": 10, "early_stopping": True} with torch.inference_mode(): with model.disable_adapter(): gen_base = model.generate(**gen_kwargs) model.set_adapter("adapter0") with torch.inference_mode(): gen_adapter0 = model.generate(**gen_kwargs) model.set_adapter("adapter1") with torch.inference_mode(): gen_adapter1 = model.generate(**gen_kwargs) def remove_padding(seq, pad_value): lst = list(seq) while lst and (lst[-1] == pad_value): lst.pop() return lst def gens_are_same(gen0, gen1): # Special function to compare generations. We cannot use torch.allclose it will raise an error when sequence # lengths differ. Morevoer, we need to remove the padding from the sequences. This is because, even though # normally identical sequences should have the same length, when we do mixed adapter batches, each sample # will be padded to the longest sequence in that mixed batch, which can be different from the longest # sequence without mixed adapter batches. pad_value = model.config.eos_token_id for sample0, sample1 in zip(gen0, gen1): sample0 = remove_padding(sample0, pad_value) sample1 = remove_padding(sample1, pad_value) if (len(sample0) != len(sample1)) or (sample0 != sample1): # at least one sample differs, the generations are not identical return False return True # sanity check that there are enough outputs and that they are different assert len(gen_base) == len(gen_adapter0) == len(gen_adapter1) assert len(gen_adapter1) >= 3 assert not gens_are_same(gen_base, gen_adapter0) assert not gens_are_same(gen_base, gen_adapter1) assert not gens_are_same(gen_adapter0, gen_adapter1) # alternate between base model, adapter0, and adapter1 adapters = ["__base__", "adapter0", "adapter1"] gen_kwargs["adapter_names"] = [adapters[i % 3] for i in (range(len(dummy_input["input_ids"])))] with torch.inference_mode(): gen_mixed = model.generate(**gen_kwargs) assert gens_are_same(gen_base[::3], gen_mixed[::3]) assert gens_are_same(gen_adapter0[1::3], gen_mixed[1::3]) assert gens_are_same(gen_adapter1[2::3], gen_mixed[2::3]) def _test_generate(self, model_id, config_cls, config_kwargs): with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `generate` works _ = model.generate(**inputs) def _test_generate_pos_args(self, model_id, config_cls, config_kwargs, raises_err: bool): with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() if raises_err: with pytest.raises(TypeError): # check if `generate` raises an error if positional arguments are passed _ = model.generate(inputs["input_ids"]) else: # check if `generate` works if positional arguments are passed _ = model.generate(inputs["input_ids"]) def _test_generate_half_prec(self, model_id, config_cls, config_kwargs): if config_cls not in (IA3Config, LoraConfig, PrefixTuningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if self.torch_device == "mps": # BFloat16 is not supported on MPS return pytest.skip("BFloat16 is not supported on MPS") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.bfloat16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask) def _test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs): if config_cls not in (PrefixTuningConfig,): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.half() assert model.base_model_torch_dtype == torch.float16 def _test_training(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") if (config_cls == AdaLoraConfig) and ("roberta" in model_id.lower()): # TODO: no gradients on the "dense" layer, other layers work, not sure why self.skipTest("AdaLora with RoBERTa does not work correctly") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() parameter_prefix = model.prefix for n, param in model.named_parameters(): if (parameter_prefix in n) or ("modules_to_save" in n) or ("token_adapter.trainable_tokens" in n): assert param.grad is not None else: assert param.grad is None def _test_inference_safetensors(self, model_id, config_cls, config_kwargs): if (config_cls == PrefixTuningConfig) and ("deberta" in model_id.lower()): # TODO: raises an error: # TypeError: DebertaModel.forward() got an unexpected keyword argument 'past_key_values' self.skipTest("DeBERTa with PrefixTuning does not work correctly") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() # set to eval mode, since things like dropout can affect the output otherwise model.eval() logits = model(**inputs)[0][0] with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, safe_serialization=True) assert "adapter_model.safetensors" in os.listdir(tmp_dirname) assert "adapter_model.bin" not in os.listdir(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to( self.torch_device ) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] assert torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4) def _test_training_layer_indexing(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, layers_to_transform=[0], **config_kwargs, ) with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() has_trainable_tokens = config_kwargs.get("trainable_token_indices", None) is not None nb_trainable = 0 for n, param in model.named_parameters(): if model.prefix in n or (has_trainable_tokens and "trainable_tokens" in n): assert param.grad is not None nb_trainable += 1 else: assert param.grad is None with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to( self.torch_device ) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] assert torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4) # check the nb of trainable params again but without layers_to_transform model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) nb_trainable_all = 0 for n, param in model.named_parameters(): if model.prefix in n or (has_trainable_tokens and "trainable_tokens" in n): nb_trainable_all += 1 mod_list = next((m for m in model.modules() if isinstance(m, torch.nn.ModuleList)), None) if mod_list and len(mod_list) == 1: # there is only a single layer assert nb_trainable == nb_trainable_all else: # more than 1 layer, i.e. setting layers_to_transform=[0] should target fewer layers assert nb_trainable < nb_trainable_all def _test_training_gradient_checkpointing(self, model_id, config_cls, config_kwargs): if config_cls == PrefixTuningConfig: return pytest.skip(f"Test not applicable for {config_cls}") if (config_cls == AdaLoraConfig) and ("roberta" in model_id.lower()): # TODO: no gradients on the "dense" layer, other layers work, not sure why self.skipTest("AdaLora with RoBERTa does not work correctly") if (config_cls == OFTConfig) and ("deberta" in model_id.lower()): # TODO: no gradients on the "dense" layer, other layers work, not sure why self.skipTest("OFT with Deberta does not work correctly") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) if not getattr(model, "supports_gradient_checkpointing", False): return pytest.skip(f"Model {model_id} does not support gradient checkpointing") model.gradient_checkpointing_enable() config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() for n, param in model.named_parameters(): if "prompt_encoder." in n: # prompt tuning methods if not issubclass(config_cls, CPTConfig): assert param.grad is not None elif ( "delta_embedding" in n ): # delta_embedding is the embedding that should be updated with grads in CPT assert param.grad is not None elif hasattr(model, "prefix") and (model.prefix in n): # non-prompt tuning methods assert param.grad is not None elif "trainable_tokens_" in n: # trainable tokens layer assert param.grad is not None else: assert param.grad is None def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, VBLoRAConfig): return pytest.skip(f"Test not applicable for {config_cls}") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) _ = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname, device_map={"": "cpu"}).to( self.torch_device ) def _test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): if not issubclass(config_cls, PromptLearningConfig): return pytest.skip(f"Test not applicable for {config_cls}") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() if issubclass(config_cls, CPTConfig): parameters = [] for name, param in model.prompt_encoder.named_parameters(): if name != "default.embedding.weight": parameters.append(param) else: parameters = model.prompt_encoder.parameters() # check that prompt encoder has grads for param in parameters: assert param.grad is not None def _test_delete_adapter(self, model_id, config_cls, config_kwargs): supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.VERA, PeftType.FOURIERFT, PeftType.HRA, PeftType.VBLORA, PeftType.BONE, PeftType.MISS, ] # IA3 does not support deleting adapters yet, but it just needs to be added # AdaLora does not support multiple adapters config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return pytest.skip(f"Test not applicable for {config.peft_type}") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) model.set_adapter(adapter_to_delete) model = model.to(self.torch_device) model.delete_adapter(adapter_to_delete) assert adapter_to_delete not in model.peft_config assert model.active_adapters == ["default"] key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr( target, "other_param_names", [] ) for attr in attributes_to_check: assert adapter_to_delete not in getattr(target, attr) # check auxiliary modules for module in model.modules(): if isinstance(module, AuxiliaryTrainingWrapper): assert adapter_to_delete not in module._adapters assert module.active_adapters == ["default"] if isinstance(module, ModulesToSaveWrapper): assert adapter_to_delete not in module.modules_to_save elif isinstance(module, TrainableTokensWrapper): assert adapter_to_delete not in module.token_adapter.trainable_tokens_delta assert adapter_to_delete not in module.token_adapter.trainable_tokens_original # check that we can also delete the last remaining adapter model.delete_adapter("default") assert "default" not in model.peft_config assert model.active_adapters == [] for module in model.modules(): if isinstance(module, AuxiliaryTrainingWrapper): assert "default" not in module._adapters assert module.active_adapters == [] if isinstance(module, ModulesToSaveWrapper): assert "default" not in module.modules_to_save elif isinstance(module, TrainableTokensWrapper): assert "default" not in module.token_adapter.trainable_tokens_delta assert "default" not in module.token_adapter.trainable_tokens_original input = self.prepare_inputs_for_testing() # note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter model.base_model(**input) # should not raise an error def _test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs): # same as test_delete_adapter, but this time an inactive adapter is deleted supported_peft_types = [ PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT, PeftType.BOFT, PeftType.FOURIERFT, PeftType.HRA, PeftType.VBLORA, PeftType.BONE, PeftType.MISS, ] # IA3 does not support deleting adapters yet, but it just needs to be added # AdaLora does not support multiple adapters config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return pytest.skip(f"Test not applicable for {config.peft_type}") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) # "delete_me" is added but not activated model = model.to(self.torch_device) model.delete_adapter(adapter_to_delete) assert adapter_to_delete not in model.peft_config assert model.active_adapters == ["default"] key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr( target, "other_param_names", [] ) for attr in attributes_to_check: assert adapter_to_delete not in getattr(target, attr) # check auxiliary modules for module in model.modules(): if isinstance(module, AuxiliaryTrainingWrapper): assert adapter_to_delete not in module._adapters assert module.active_adapters == ["default"] if isinstance(module, ModulesToSaveWrapper): assert adapter_to_delete not in module.modules_to_save elif isinstance(module, TrainableTokensWrapper): assert adapter_to_delete not in module.token_adapter.trainable_tokens_delta assert adapter_to_delete not in module.token_adapter.trainable_tokens_original # check that we can also delete the last remaining adapter model.delete_adapter("default") assert "default" not in model.peft_config assert model.active_adapters == [] for module in model.modules(): if isinstance(module, AuxiliaryTrainingWrapper): assert "default" not in module._adapters assert module.active_adapters == [] if isinstance(module, ModulesToSaveWrapper): assert "default" not in module.modules_to_save elif isinstance(module, TrainableTokensWrapper): assert "default" not in module.token_adapter.trainable_tokens_delta assert "default" not in module.token_adapter.trainable_tokens_original input = self.prepare_inputs_for_testing() # note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter model.base_model(**input) # should not raise an error def _test_delete_unknown_adapter_raises(self, model_id, config_cls, config_kwargs): # Check that we get a nice error message when trying to delete an adapter that does not exist. config = config_cls(base_model_name_or_path=model_id, **config_kwargs) with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) msg = "Adapter unknown-adapter does not exist" with pytest.raises(ValueError, match=msg): model.delete_adapter("unknown-adapter") def _test_unload_adapter(self, model_id, config_cls, config_kwargs): with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) num_params_base = len(model.state_dict()) dummy_input = self.prepare_inputs_for_testing() with torch.inference_mode(): logits_transformers = model(**dummy_input)[0] config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) if isinstance(config, PromptLearningConfig): # prompt learning does not support unloading with pytest.raises(AttributeError): model = model.unload() else: self.perturb_trainable_token_weights_if_used(model, config_kwargs) with torch.inference_mode(): logits_with_adapter = model(**dummy_input)[0] model.eval() model = model.unload() num_params_unloaded = len(model.state_dict()) with torch.inference_mode(): logits_unload = model(**dummy_input)[0] # check that PEFT layers are completely removed assert not any(isinstance(module, BaseTunerLayer) for module in model.modules()) assert not torch.allclose(logits_with_adapter, logits_unload, atol=1e-10, rtol=1e-10) assert torch.allclose(logits_transformers, logits_unload, atol=1e-4, rtol=1e-4) assert num_params_base == num_params_unloaded def _test_weighted_combination_of_adapters_lora(self, model, config, adapter_list, weight_list): model.add_adapter(adapter_list[1], config) model.add_adapter(adapter_list[2], replace(config, r=20)) model = model.to(self.torch_device) # test re-weighting single adapter model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting") # test svd re-weighting with multiple adapters model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_svd_reweighting") # test ties_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_ties_svd_reweighting", combination_type="ties_svd", density=0.5, ) # test dare_linear_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_linear_svd_reweighting", combination_type="dare_linear_svd", density=0.5, ) # test dare_ties_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_ties_svd_reweighting", combination_type="dare_ties_svd", density=0.5, ) # test magnitude_prune_svd re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_magnitude_prune_svd_reweighting", combination_type="magnitude_prune_svd", density=0.5, ) # test cat re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_cat_reweighting", combination_type="cat" ) # test linear re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_linear_reweighting", combination_type="linear" ) # test ties re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_ties_reweighting", combination_type="ties", density=0.5 ) # test dare_linear re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_dare_linear_reweighting", combination_type="dare_linear", density=0.5, ) # test dare_ties re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_dare_ties_reweighting", combination_type="dare_ties", density=0.5, ) # test magnitude_prune re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_magnitude_prune_reweighting", combination_type="magnitude_prune", density=0.5, ) # test linear re-weighting with multiple adapters with only first adapter having non zero weight model.add_weighted_adapter( adapter_list[:2], [weight_list[0], 0], "multi_adapter_linear_reweighting_single_enabled", combination_type="linear", ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_linear_reweighting_uneven_r", combination_type="linear", ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_ties_reweighting_uneven_r", combination_type="ties", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_linear_reweighting_uneven_r", combination_type="dare_linear", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_dare_ties_reweighting_uneven_r", combination_type="dare_ties", density=0.5, ) with pytest.raises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_magnitude_prune_reweighting_uneven_r", combination_type="magnitude_prune", density=0.5, ) new_adapters = [ "single_adapter_reweighting", "multi_adapter_svd_reweighting", "multi_adapter_ties_svd_reweighting", "multi_adapter_dare_linear_svd_reweighting", "multi_adapter_dare_ties_svd_reweighting", "multi_adapter_magnitude_prune_svd_reweighting", "multi_adapter_cat_reweighting", "multi_adapter_linear_reweighting", "multi_adapter_linear_reweighting_single_enabled", "multi_adapter_ties_reweighting", "multi_adapter_dare_linear_reweighting", "multi_adapter_dare_ties_reweighting", "multi_adapter_magnitude_prune_reweighting", ] for new_adapter in new_adapters: assert new_adapter in model.peft_config key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) if isinstance(target, LoraLayer): for adapter_name in new_adapters: if "single" in adapter_name: new_delta_weight = target.get_delta_weight(adapter_name) weighted_original_delta_weights = target.get_delta_weight(adapter_list[0]) * weight_list[0] assert torch.allclose(new_delta_weight, weighted_original_delta_weights, atol=1e-4, rtol=1e-4) elif "svd" in adapter_name: assert target.r[adapter_name] == 20 elif "linear" in adapter_name: assert target.r[adapter_name] == 8 elif "cat" in adapter_name: assert target.r[adapter_name] == 28 dummy_input = self.prepare_inputs_for_testing() model.eval() for adapter_name in new_adapters: # ensuring new adapters pass the forward loop model.set_adapter(adapter_name) assert model.active_adapter == adapter_name assert model.active_adapters == [adapter_name] model(**dummy_input)[0] def _test_weighted_combination_of_adapters_ia3(self, model, config, adapter_list, weight_list): model.add_adapter(adapter_list[1], config) model.add_adapter(adapter_list[2], config) model = model.to(self.torch_device) # test re-weighting single adapter model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting") # test re-weighting with multiple adapters model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_reweighting") new_adapters = [ "single_adapter_reweighting", "multi_adapter_reweighting", ] for new_adapter in new_adapters: assert new_adapter in model.peft_config dummy_input = self.prepare_inputs_for_testing() model.eval() for adapter_name in new_adapters: # ensuring new adapters pass the forward loop model.set_adapter(adapter_name) assert model.active_adapter == adapter_name assert model.active_adapters == [adapter_name] model(**dummy_input)[0] def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return pytest.skip(f"Test not applicable for {config_cls}") if model_id.endswith("qwen2"): # Qwen2 fails with weighted adapter combinations using SVD return pytest.skip(f"Test does not work with model {model_id}") if "gemma" in model_id.lower(): return pytest.skip("Combining Gemma adapters with SVD is currently failing") adapter_list = ["adapter1", "adapter_2", "adapter_3"] weight_list = [0.5, 1.5, 1.5] # Initialize the config config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if not isinstance(config, (LoraConfig, IA3Config)): # This test is only applicable for Lora and IA3 configs return pytest.skip(f"Test not applicable for {config}") with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_list[0]) if isinstance(config, LoraConfig): self._test_weighted_combination_of_adapters_lora(model, config, adapter_list, weight_list) elif isinstance(config, IA3Config): self._test_weighted_combination_of_adapters_ia3(model, config, adapter_list, weight_list) else: pytest.skip(f"Test not applicable for {config}") def _test_disable_adapter(self, model_id, config_cls, config_kwargs): task_type = config_kwargs.get("task_type") if (task_type == "SEQ_2_SEQ_LM") and (config_cls in (PromptTuningConfig, PromptEncoderConfig)): self.skipTest("Seq2Seq + prompt tuning/prompt encoder does not work with disabling adapters") def get_output(model): # helper function that works with different model types torch.manual_seed(0) if hasattr(model, "generate"): # let's check the scores, not the output ids, since the latter can easily be identical even if the # weights are slightly changed output = model.generate(**input, return_dict_in_generate=True, output_scores=True).scores[0] # take element 0, as output is a tuple else: output = model(**input) if hasattr(output, "images"): # for SD import numpy as np img = output.images[0] return torch.from_numpy(np.array(img)) return output # initialize model with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) # output from BASE MODEL input = self.prepare_inputs_for_testing() output_before = get_output(model) # output from PEFT MODEL if hasattr(self, "instantiate_sd_peft"): # SD models are instantiated differently peft_model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) else: config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) peft_model = get_peft_model(model, config) # trainable_token_indices doesn't have support for `init_weights` so we have to do this manually self.perturb_trainable_token_weights_if_used(model, config_kwargs) output_peft = get_output(peft_model) # first check trivial case is not true that peft does not affect the output; for this to work, init_weight # must be False (if the config supports it) if isinstance(peft_model, StableDiffusionPipeline): # for SD, check that most pixels have different values assert (output_before != output_peft).float().mean() > 0.8 else: assert not torch.allclose(output_before, output_peft) # output with DISABLED ADAPTER if isinstance(peft_model, StableDiffusionPipeline): with peft_model.unet.disable_adapter(): with peft_model.text_encoder.disable_adapter(): output_peft_disabled = get_output(peft_model) # for SD, very rarely, a pixel can differ assert (output_before != output_peft_disabled).float().mean() < 1e-4 else: atol, rtol = 1e-6, 1e-6 if (platform.system() == "Windows") and (model_id == "trl-internal-testing/tiny-Llama4ForCausalLM"): # for some reason, Windows CI fails with stricter tolerance atol, rtol = 1e-5, 1e-5 with peft_model.disable_adapter(): output_peft_disabled = get_output(peft_model) assert torch.allclose(output_before, output_peft_disabled, atol=atol, rtol=rtol) # after leaving the disable_adapter context, the output should be the same as with enabled adapter again # see #1501 output_peft_after_disabled = get_output(peft_model) assert torch.allclose(output_peft, output_peft_after_disabled, atol=atol, rtol=rtol) # TODO: add tests to check if disabling adapters works after calling merge_adapter def _test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs): # When trying to add multiple adapters with bias in Lora, AdaLora or BOFTConfig, an error should be # raised. Also, the peft model should not be left in a half-initialized state. if not issubclass(config_cls, (LoraConfig, AdaLoraConfig, BOFTConfig)): return pytest.skip(f"Test not applicable for {config_cls}") with hub_online_once(model_id): config_kwargs = config_kwargs.copy() config_kwargs["bias"] = "all" config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, "adapter0") if config_cls == LoraConfig or config_cls == AdaLoraConfig: with pytest.raises(ValueError): model.add_adapter("adapter1", replace(config, r=20)) if config_cls == BOFTConfig: with pytest.raises(ValueError): model.add_adapter("adapter1", replace(config, boft_block_num=1, boft_block_size=0)) # (superficial) test that the model is not left in a half-initialized state when adding an adapter fails assert "adapter1" not in model.peft_config assert "adapter1" not in model.base_model.peft_config def _test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): # https://github.com/huggingface/peft/issues/727 with hub_online_once(model_id): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter").to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() inputs_embeds = model.get_input_embeddings()(dummy_input["input_ids"]) # just check that no error is raised model.forward(inputs_embeds=inputs_embeds)
peft/tests/testing_common.py/0
{ "file_path": "peft/tests/testing_common.py", "repo_id": "peft", "token_count": 40862 }
252
#!/usr/bin/env python3 """ Checkpoint Averaging Script This script averages all model weights for checkpoints in specified path that match the specified filter wildcard. All checkpoints must be from the exact same model. For any hope of decent results, the checkpoints should be from the same or child (via resumes) training session. This can be viewed as similar to maintaining running EMA (exponential moving average) of the model weights or performing SWA (stochastic weight averaging), but post-training. Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) """ import torch import argparse import os import glob import hashlib from timm.models import load_state_dict try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False DEFAULT_OUTPUT = "./averaged.pth" DEFAULT_SAFE_OUTPUT = "./averaged.safetensors" parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager') parser.add_argument('--input', default='', type=str, metavar='PATH', help='path to base input folder containing checkpoints') parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD', help='checkpoint filter (path wildcard)') parser.add_argument('--output', default=DEFAULT_OUTPUT, type=str, metavar='PATH', help=f'Output filename. Defaults to {DEFAULT_SAFE_OUTPUT} when passing --safetensors.') parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', help='Force not using ema version of weights (if present)') parser.add_argument('--no-sort', dest='no_sort', action='store_true', help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant') parser.add_argument('-n', type=int, default=10, metavar='N', help='Number of checkpoints to average') parser.add_argument('--safetensors', action='store_true', help='Save weights using safetensors instead of the default torch way (pickle).') def checkpoint_metric(checkpoint_path): if not checkpoint_path or not os.path.isfile(checkpoint_path): return {} print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path)) checkpoint = torch.load(checkpoint_path, map_location='cpu') metric = None if 'metric' in checkpoint: metric = checkpoint['metric'] elif 'metrics' in checkpoint and 'metric_name' in checkpoint: metrics = checkpoint['metrics'] print(metrics) metric = metrics[checkpoint['metric_name']] return metric def main(): args = parser.parse_args() # by default use the EMA weights (if present) args.use_ema = not args.no_use_ema # by default sort by checkpoint metric (if present) and avg top n checkpoints args.sort = not args.no_sort if args.safetensors and args.output == DEFAULT_OUTPUT: # Default path changes if using safetensors args.output = DEFAULT_SAFE_OUTPUT output, output_ext = os.path.splitext(args.output) if not output_ext: output_ext = ('.safetensors' if args.safetensors else '.pth') output = output + output_ext if args.safetensors and not output_ext == ".safetensors": print( "Warning: saving weights as safetensors but output file extension is not " f"set to '.safetensors': {args.output}" ) if os.path.exists(output): print("Error: Output filename ({}) already exists.".format(output)) exit(1) pattern = args.input if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep): pattern += os.path.sep pattern += args.filter checkpoints = glob.glob(pattern, recursive=True) if args.sort: checkpoint_metrics = [] for c in checkpoints: metric = checkpoint_metric(c) if metric is not None: checkpoint_metrics.append((metric, c)) checkpoint_metrics = list(sorted(checkpoint_metrics)) checkpoint_metrics = checkpoint_metrics[-args.n:] if checkpoint_metrics: print("Selected checkpoints:") [print(m, c) for m, c in checkpoint_metrics] avg_checkpoints = [c for m, c in checkpoint_metrics] else: avg_checkpoints = checkpoints if avg_checkpoints: print("Selected checkpoints:") [print(c) for c in checkpoints] if not avg_checkpoints: print('Error: No checkpoints found to average.') exit(1) avg_state_dict = {} avg_counts = {} for c in avg_checkpoints: new_state_dict = load_state_dict(c, args.use_ema) if not new_state_dict: print(f"Error: Checkpoint ({c}) doesn't exist") continue for k, v in new_state_dict.items(): if k not in avg_state_dict: avg_state_dict[k] = v.clone().to(dtype=torch.float64) avg_counts[k] = 1 else: avg_state_dict[k] += v.to(dtype=torch.float64) avg_counts[k] += 1 for k, v in avg_state_dict.items(): v.div_(avg_counts[k]) # float32 overflow seems unlikely based on weights seen to date, but who knows float32_info = torch.finfo(torch.float32) final_state_dict = {} for k, v in avg_state_dict.items(): v = v.clamp(float32_info.min, float32_info.max) final_state_dict[k] = v.to(dtype=torch.float32) if args.safetensors: assert _has_safetensors, "`pip install safetensors` to use .safetensors" safetensors.torch.save_file(final_state_dict, output) else: torch.save(final_state_dict, output) with open(output, 'rb') as f: sha_hash = hashlib.sha256(f.read()).hexdigest() print(f"=> Saved state_dict to '{output}, SHA256: {sha_hash}'") if __name__ == '__main__': main()
pytorch-image-models/avg_checkpoints.py/0
{ "file_path": "pytorch-image-models/avg_checkpoints.py", "repo_id": "pytorch-image-models", "token_count": 2377 }
253
# AdvProp (EfficientNet) **AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples. The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ap`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{xie2020adversarial, title={Adversarial Examples Improve Image Recognition}, author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le}, year={2020}, eprint={1911.09665}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: AdvProp Paper: Title: Adversarial Examples Improve Image Recognition URL: https://paperswithcode.com/paper/adversarial-examples-improve-image Models: - Name: tf_efficientnet_b0_ap In Collection: AdvProp Metadata: FLOPs: 488688572 Parameters: 5290000 File Size: 21385973 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b0_ap LR: 0.256 Epochs: 350 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 2048 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1334 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.1% Top 5 Accuracy: 93.26% - Name: tf_efficientnet_b1_ap In Collection: AdvProp Metadata: FLOPs: 883633200 Parameters: 7790000 File Size: 31515350 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b1_ap LR: 0.256 Epochs: 350 Crop Pct: '0.882' Momentum: 0.9 Batch Size: 2048 Image Size: '240' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1344 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.28% Top 5 Accuracy: 94.3% - Name: tf_efficientnet_b2_ap In Collection: AdvProp Metadata: FLOPs: 1234321170 Parameters: 9110000 File Size: 36800745 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b2_ap LR: 0.256 Epochs: 350 Crop Pct: '0.89' Momentum: 0.9 Batch Size: 2048 Image Size: '260' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1354 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.3% Top 5 Accuracy: 95.03% - Name: tf_efficientnet_b3_ap In Collection: AdvProp Metadata: FLOPs: 2275247568 Parameters: 12230000 File Size: 49384538 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b3_ap LR: 0.256 Epochs: 350 Crop Pct: '0.904' Momentum: 0.9 Batch Size: 2048 Image Size: '300' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1364 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.82% Top 5 Accuracy: 95.62% - Name: tf_efficientnet_b4_ap In Collection: AdvProp Metadata: FLOPs: 5749638672 Parameters: 19340000 File Size: 77993585 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b4_ap LR: 0.256 Epochs: 350 Crop Pct: '0.922' Momentum: 0.9 Batch Size: 2048 Image Size: '380' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1374 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.26% Top 5 Accuracy: 96.39% - Name: tf_efficientnet_b5_ap In Collection: AdvProp Metadata: FLOPs: 13176501888 Parameters: 30390000 File Size: 122403150 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b5_ap LR: 0.256 Epochs: 350 Crop Pct: '0.934' Momentum: 0.9 Batch Size: 2048 Image Size: '456' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1384 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.25% Top 5 Accuracy: 96.97% - Name: tf_efficientnet_b6_ap In Collection: AdvProp Metadata: FLOPs: 24180518488 Parameters: 43040000 File Size: 173237466 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b6_ap LR: 0.256 Epochs: 350 Crop Pct: '0.942' Momentum: 0.9 Batch Size: 2048 Image Size: '528' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1394 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.79% Top 5 Accuracy: 97.14% - Name: tf_efficientnet_b7_ap In Collection: AdvProp Metadata: FLOPs: 48205304880 Parameters: 66349999 File Size: 266850607 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b7_ap LR: 0.256 Epochs: 350 Crop Pct: '0.949' Momentum: 0.9 Batch Size: 2048 Image Size: '600' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1405 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.12% Top 5 Accuracy: 97.25% - Name: tf_efficientnet_b8_ap In Collection: AdvProp Metadata: FLOPs: 80962956270 Parameters: 87410000 File Size: 351412563 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AdvProp - AutoAugment - Label Smoothing - RMSProp - Stochastic Depth - Weight Decay Training Data: - ImageNet ID: tf_efficientnet_b8_ap LR: 0.128 Epochs: 350 Crop Pct: '0.954' Momentum: 0.9 Batch Size: 2048 Image Size: '672' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1416 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.37% Top 5 Accuracy: 97.3% -->
pytorch-image-models/hfdocs/source/models/advprop.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/advprop.mdx", "repo_id": "pytorch-image-models", "token_count": 6035 }
254
# NASNet **NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('nasnetalarge', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `nasnetalarge`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('nasnetalarge', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{zoph2018learning, title={Learning Transferable Architectures for Scalable Image Recognition}, author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le}, year={2018}, eprint={1707.07012}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: NASNet Paper: Title: Learning Transferable Architectures for Scalable Image Recognition URL: https://paperswithcode.com/paper/learning-transferable-architectures-for Models: - Name: nasnetalarge In Collection: NASNet Metadata: FLOPs: 30242402862 Parameters: 88750000 File Size: 356056626 Architecture: - Average Pooling - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - ReLU Tasks: - Image Classification Training Techniques: - Label Smoothing - RMSProp - Weight Decay Training Data: - ImageNet Training Resources: 50x Tesla K40 GPUs ID: nasnetalarge Dropout: 0.5 Crop Pct: '0.911' Momentum: 0.9 Image Size: '331' Interpolation: bicubic Label Smoothing: 0.1 RMSProp \\( \epsilon \\): 1.0 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/nasnet.py#L562 Weights: http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.63% Top 5 Accuracy: 96.05% -->
pytorch-image-models/hfdocs/source/models/nasnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/nasnet.mdx", "repo_id": "pytorch-image-models", "token_count": 1539 }
255
# SK-ResNeXt **SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('skresnext50_32x4d', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `skresnext50_32x4d`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('skresnext50_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{li2019selective, title={Selective Kernel Networks}, author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, year={2019}, eprint={1903.06586}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: SKResNeXt Paper: Title: Selective Kernel Networks URL: https://paperswithcode.com/paper/selective-kernel-networks Models: - Name: skresnext50_32x4d In Collection: SKResNeXt Metadata: FLOPs: 5739845824 Parameters: 27480000 File Size: 110340975 Architecture: - Convolution - Dense Connections - Global Average Pooling - Grouped Convolution - Max Pooling - Residual Connection - Selective Kernel - Softmax Tasks: - Image Classification Training Data: - ImageNet Training Resources: 8x GPUs ID: skresnext50_32x4d LR: 0.1 Epochs: 100 Layers: 50 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0001 Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/sknet.py#L210 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.15% Top 5 Accuracy: 94.64% -->
pytorch-image-models/hfdocs/source/models/skresnext.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/skresnext.mdx", "repo_id": "pytorch-image-models", "token_count": 1646 }
256