text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import List import torch from ..utils import is_peft_version, logging, state_dict_all_zero logger = logging.get_logger(__name__) def swap_scale_shift(weight): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def _maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config, delimiter="_", block_slice_pos=5): # 1. get all state_dict_keys all_keys = list(state_dict.keys()) sgm_patterns = ["input_blocks", "middle_block", "output_blocks"] not_sgm_patterns = ["down_blocks", "mid_block", "up_blocks"] # check if state_dict contains both patterns contains_sgm_patterns = False contains_not_sgm_patterns = False for key in all_keys: if any(p in key for p in sgm_patterns): contains_sgm_patterns = True elif any(p in key for p in not_sgm_patterns): contains_not_sgm_patterns = True # if state_dict contains both patterns, remove sgm # we can then return state_dict immediately if contains_sgm_patterns and contains_not_sgm_patterns: for key in all_keys: if any(p in key for p in sgm_patterns): state_dict.pop(key) return state_dict # 2. check if needs remapping, if not return original dict is_in_sgm_format = False for key in all_keys: if any(p in key for p in sgm_patterns): is_in_sgm_format = True break if not is_in_sgm_format: return state_dict # 3. Else remap from SGM patterns new_state_dict = {} inner_block_map = ["resnets", "attentions", "upsamplers"] # Retrieves # of down, mid and up blocks input_block_ids, middle_block_ids, output_block_ids = set(), set(), set() for layer in all_keys: if "text" in layer: new_state_dict[layer] = state_dict.pop(layer) else: layer_id = int(layer.split(delimiter)[:block_slice_pos][-1]) if sgm_patterns[0] in layer: input_block_ids.add(layer_id) elif sgm_patterns[1] in layer: middle_block_ids.add(layer_id) elif sgm_patterns[2] in layer: output_block_ids.add(layer_id) else: raise ValueError(f"Checkpoint not supported because layer {layer} not supported.") input_blocks = { layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key] for layer_id in input_block_ids } middle_blocks = { layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key] for layer_id in middle_block_ids } output_blocks = { layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key] for layer_id in output_block_ids } # Rename keys accordingly for i in input_block_ids: block_id = (i - 1) // (unet_config.layers_per_block + 1) layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1) for key in input_blocks[i]: inner_block_id = int(key.split(delimiter)[block_slice_pos]) inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers" inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0" new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + [str(block_id), inner_block_key, inner_layers_in_block] + key.split(delimiter)[block_slice_pos + 1 :] ) new_state_dict[new_key] = state_dict.pop(key) for i in middle_block_ids: key_part = None if i == 0: key_part = [inner_block_map[0], "0"] elif i == 1: key_part = [inner_block_map[1], "0"] elif i == 2: key_part = [inner_block_map[0], "1"] else: raise ValueError(f"Invalid middle block id {i}.") for key in middle_blocks[i]: new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:] ) new_state_dict[new_key] = state_dict.pop(key) for i in output_block_ids: block_id = i // (unet_config.layers_per_block + 1) layer_in_block_id = i % (unet_config.layers_per_block + 1) for key in output_blocks[i]: inner_block_id = int(key.split(delimiter)[block_slice_pos]) inner_block_key = inner_block_map[inner_block_id] inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0" new_key = delimiter.join( key.split(delimiter)[: block_slice_pos - 1] + [str(block_id), inner_block_key, inner_layers_in_block] + key.split(delimiter)[block_slice_pos + 1 :] ) new_state_dict[new_key] = state_dict.pop(key) if state_dict: raise ValueError("At this point all state dict entries have to be converted.") return new_state_dict def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_encoder_name="text_encoder"): """ Converts a non-Diffusers LoRA state dict to a Diffusers compatible state dict. Args: state_dict (`dict`): The state dict to convert. unet_name (`str`, optional): The name of the U-Net module in the Diffusers model. Defaults to "unet". text_encoder_name (`str`, optional): The name of the text encoder module in the Diffusers model. Defaults to "text_encoder". Returns: `tuple`: A tuple containing the converted state dict and a dictionary of alphas. """ unet_state_dict = {} te_state_dict = {} te2_state_dict = {} network_alphas = {} # Check for DoRA-enabled LoRAs. dora_present_in_unet = any("dora_scale" in k and "lora_unet_" in k for k in state_dict) dora_present_in_te = any("dora_scale" in k and ("lora_te_" in k or "lora_te1_" in k) for k in state_dict) dora_present_in_te2 = any("dora_scale" in k and "lora_te2_" in k for k in state_dict) if dora_present_in_unet or dora_present_in_te or dora_present_in_te2: if is_peft_version("<", "0.9.0"): raise ValueError( "You need `peft` 0.9.0 at least to use DoRA-enabled LoRAs. Please upgrade your installation of `peft`." ) # Iterate over all LoRA weights. all_lora_keys = list(state_dict.keys()) for key in all_lora_keys: if not key.endswith("lora_down.weight"): continue # Extract LoRA name. lora_name = key.split(".")[0] # Find corresponding up weight and alpha. lora_name_up = lora_name + ".lora_up.weight" lora_name_alpha = lora_name + ".alpha" # Handle U-Net LoRAs. if lora_name.startswith("lora_unet_"): diffusers_name = _convert_unet_lora_key(key) # Store down and up weights. unet_state_dict[diffusers_name] = state_dict.pop(key) unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) # Store DoRA scale if present. if dora_present_in_unet: dora_scale_key_to_replace = "_lora.down." if "_lora.down." in diffusers_name else ".lora.down." unet_state_dict[diffusers_name.replace(dora_scale_key_to_replace, ".lora_magnitude_vector.")] = ( state_dict.pop(key.replace("lora_down.weight", "dora_scale")) ) # Handle text encoder LoRAs. elif lora_name.startswith(("lora_te_", "lora_te1_", "lora_te2_")): diffusers_name = _convert_text_encoder_lora_key(key, lora_name) # Store down and up weights for te or te2. if lora_name.startswith(("lora_te_", "lora_te1_")): te_state_dict[diffusers_name] = state_dict.pop(key) te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) else: te2_state_dict[diffusers_name] = state_dict.pop(key) te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up) # Store DoRA scale if present. if dora_present_in_te or dora_present_in_te2: dora_scale_key_to_replace_te = ( "_lora.down." if "_lora.down." in diffusers_name else ".lora_linear_layer." ) if lora_name.startswith(("lora_te_", "lora_te1_")): te_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = ( state_dict.pop(key.replace("lora_down.weight", "dora_scale")) ) elif lora_name.startswith("lora_te2_"): te2_state_dict[diffusers_name.replace(dora_scale_key_to_replace_te, ".lora_magnitude_vector.")] = ( state_dict.pop(key.replace("lora_down.weight", "dora_scale")) ) # Store alpha if present. if lora_name_alpha in state_dict: alpha = state_dict.pop(lora_name_alpha).item() network_alphas.update(_get_alpha_name(lora_name_alpha, diffusers_name, alpha)) # Check if any keys remain. if len(state_dict) > 0: raise ValueError(f"The following keys have not been correctly renamed: \n\n {', '.join(state_dict.keys())}") logger.info("Non-diffusers checkpoint detected.") # Construct final state dict. unet_state_dict = {f"{unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()} te_state_dict = {f"{text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()} te2_state_dict = ( {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()} if len(te2_state_dict) > 0 else None ) if te2_state_dict is not None: te_state_dict.update(te2_state_dict) new_state_dict = {**unet_state_dict, **te_state_dict} return new_state_dict, network_alphas def _convert_unet_lora_key(key): """ Converts a U-Net LoRA key to a Diffusers compatible key. """ diffusers_name = key.replace("lora_unet_", "").replace("_", ".") # Replace common U-Net naming patterns. diffusers_name = diffusers_name.replace("input.blocks", "down_blocks") diffusers_name = diffusers_name.replace("down.blocks", "down_blocks") diffusers_name = diffusers_name.replace("middle.block", "mid_block") diffusers_name = diffusers_name.replace("mid.block", "mid_block") diffusers_name = diffusers_name.replace("output.blocks", "up_blocks") diffusers_name = diffusers_name.replace("up.blocks", "up_blocks") diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks") diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora") diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora") diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora") diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora") diffusers_name = diffusers_name.replace("proj.in", "proj_in") diffusers_name = diffusers_name.replace("proj.out", "proj_out") diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj") # SDXL specific conversions. if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name: pattern = r"\.\d+(?=\D*$)" diffusers_name = re.sub(pattern, "", diffusers_name, count=1) if ".in." in diffusers_name: diffusers_name = diffusers_name.replace("in.layers.2", "conv1") if ".out." in diffusers_name: diffusers_name = diffusers_name.replace("out.layers.3", "conv2") if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name: diffusers_name = diffusers_name.replace("op", "conv") if "skip" in diffusers_name: diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut") # LyCORIS specific conversions. if "time.emb.proj" in diffusers_name: diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj") if "conv.shortcut" in diffusers_name: diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut") # General conversions. if "transformer_blocks" in diffusers_name: if "attn1" in diffusers_name or "attn2" in diffusers_name: diffusers_name = diffusers_name.replace("attn1", "attn1.processor") diffusers_name = diffusers_name.replace("attn2", "attn2.processor") elif "ff" in diffusers_name: pass elif any(key in diffusers_name for key in ("proj_in", "proj_out")): pass else: pass return diffusers_name def _convert_text_encoder_lora_key(key, lora_name): """ Converts a text encoder LoRA key to a Diffusers compatible key. """ if lora_name.startswith(("lora_te_", "lora_te1_")): key_to_replace = "lora_te_" if lora_name.startswith("lora_te_") else "lora_te1_" else: key_to_replace = "lora_te2_" diffusers_name = key.replace(key_to_replace, "").replace("_", ".") diffusers_name = diffusers_name.replace("text.model", "text_model") diffusers_name = diffusers_name.replace("self.attn", "self_attn") diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora") diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora") diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora") diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora") diffusers_name = diffusers_name.replace("text.projection", "text_projection") if "self_attn" in diffusers_name or "text_projection" in diffusers_name: pass elif "mlp" in diffusers_name: # Be aware that this is the new diffusers convention and the rest of the code might # not utilize it yet. diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.") return diffusers_name def _get_alpha_name(lora_name_alpha, diffusers_name, alpha): """ Gets the correct alpha name for the Diffusers model. """ if lora_name_alpha.startswith("lora_unet_"): prefix = "unet." elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")): prefix = "text_encoder." else: prefix = "text_encoder_2." new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha" return {new_name: alpha} # The utilities under `_convert_kohya_flux_lora_to_diffusers()` # are adapted from https://github.com/kohya-ss/sd-scripts/blob/a61cf73a5cb5209c3f4d1a3688dd276a4dfd1ecb/networks/convert_flux_lora.py def _convert_kohya_flux_lora_to_diffusers(state_dict): def _convert_to_ai_toolkit(sds_sd, ait_sd, sds_key, ait_key): if sds_key + ".lora_down.weight" not in sds_sd: return down_weight = sds_sd.pop(sds_key + ".lora_down.weight") # scale weight by alpha and dim rank = down_weight.shape[0] default_alpha = torch.tensor(rank, dtype=down_weight.dtype, device=down_weight.device, requires_grad=False) alpha = sds_sd.pop(sds_key + ".alpha", default_alpha).item() # alpha is scalar scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here # calculate scale_down and scale_up to keep the same value. if scale is 4, scale_down is 2 and scale_up is 2 scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 ait_sd[ait_key + ".lora_A.weight"] = down_weight * scale_down ait_sd[ait_key + ".lora_B.weight"] = sds_sd.pop(sds_key + ".lora_up.weight") * scale_up def _convert_to_ai_toolkit_cat(sds_sd, ait_sd, sds_key, ait_keys, dims=None): if sds_key + ".lora_down.weight" not in sds_sd: return down_weight = sds_sd.pop(sds_key + ".lora_down.weight") up_weight = sds_sd.pop(sds_key + ".lora_up.weight") sd_lora_rank = down_weight.shape[0] # scale weight by alpha and dim default_alpha = torch.tensor( sd_lora_rank, dtype=down_weight.dtype, device=down_weight.device, requires_grad=False ) alpha = sds_sd.pop(sds_key + ".alpha", default_alpha) scale = alpha / sd_lora_rank # calculate scale_down and scale_up scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 down_weight = down_weight * scale_down up_weight = up_weight * scale_up # calculate dims if not provided num_splits = len(ait_keys) if dims is None: dims = [up_weight.shape[0] // num_splits] * num_splits else: assert sum(dims) == up_weight.shape[0] # check upweight is sparse or not is_sparse = False if sd_lora_rank % num_splits == 0: ait_rank = sd_lora_rank // num_splits is_sparse = True i = 0 for j in range(len(dims)): for k in range(len(dims)): if j == k: continue is_sparse = is_sparse and torch.all( up_weight[i : i + dims[j], k * ait_rank : (k + 1) * ait_rank] == 0 ) i += dims[j] if is_sparse: logger.info(f"weight is sparse: {sds_key}") # make ai-toolkit weight ait_down_keys = [k + ".lora_A.weight" for k in ait_keys] ait_up_keys = [k + ".lora_B.weight" for k in ait_keys] if not is_sparse: # down_weight is copied to each split ait_sd.update(dict.fromkeys(ait_down_keys, down_weight)) # up_weight is split to each split ait_sd.update({k: v for k, v in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) # noqa: C416 else: # down_weight is chunked to each split ait_sd.update({k: v for k, v in zip(ait_down_keys, torch.chunk(down_weight, num_splits, dim=0))}) # noqa: C416 # up_weight is sparse: only non-zero values are copied to each split i = 0 for j in range(len(dims)): ait_sd[ait_up_keys[j]] = up_weight[i : i + dims[j], j * ait_rank : (j + 1) * ait_rank].contiguous() i += dims[j] def _convert_sd_scripts_to_ai_toolkit(sds_sd): ait_sd = {} for i in range(19): _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_attn_proj", f"transformer.transformer_blocks.{i}.attn.to_out.0", ) _convert_to_ai_toolkit_cat( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_attn_qkv", [ f"transformer.transformer_blocks.{i}.attn.to_q", f"transformer.transformer_blocks.{i}.attn.to_k", f"transformer.transformer_blocks.{i}.attn.to_v", ], ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_mlp_0", f"transformer.transformer_blocks.{i}.ff.net.0.proj", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_mlp_2", f"transformer.transformer_blocks.{i}.ff.net.2", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_img_mod_lin", f"transformer.transformer_blocks.{i}.norm1.linear", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_attn_proj", f"transformer.transformer_blocks.{i}.attn.to_add_out", ) _convert_to_ai_toolkit_cat( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_attn_qkv", [ f"transformer.transformer_blocks.{i}.attn.add_q_proj", f"transformer.transformer_blocks.{i}.attn.add_k_proj", f"transformer.transformer_blocks.{i}.attn.add_v_proj", ], ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_mlp_0", f"transformer.transformer_blocks.{i}.ff_context.net.0.proj", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_mlp_2", f"transformer.transformer_blocks.{i}.ff_context.net.2", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_double_blocks_{i}_txt_mod_lin", f"transformer.transformer_blocks.{i}.norm1_context.linear", ) for i in range(38): _convert_to_ai_toolkit_cat( sds_sd, ait_sd, f"lora_unet_single_blocks_{i}_linear1", [ f"transformer.single_transformer_blocks.{i}.attn.to_q", f"transformer.single_transformer_blocks.{i}.attn.to_k", f"transformer.single_transformer_blocks.{i}.attn.to_v", f"transformer.single_transformer_blocks.{i}.proj_mlp", ], dims=[3072, 3072, 3072, 12288], ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_single_blocks_{i}_linear2", f"transformer.single_transformer_blocks.{i}.proj_out", ) _convert_to_ai_toolkit( sds_sd, ait_sd, f"lora_unet_single_blocks_{i}_modulation_lin", f"transformer.single_transformer_blocks.{i}.norm.linear", ) # TODO: alphas. def assign_remaining_weights(assignments, source): for lora_key in ["lora_A", "lora_B"]: orig_lora_key = "lora_down" if lora_key == "lora_A" else "lora_up" for target_fmt, source_fmt, transform in assignments: target_key = target_fmt.format(lora_key=lora_key) source_key = source_fmt.format(orig_lora_key=orig_lora_key) value = source.pop(source_key) if transform: value = transform(value) ait_sd[target_key] = value if any("guidance_in" in k for k in sds_sd): assign_remaining_weights( [ ( "time_text_embed.guidance_embedder.linear_1.{lora_key}.weight", "lora_unet_guidance_in_in_layer.{orig_lora_key}.weight", None, ), ( "time_text_embed.guidance_embedder.linear_2.{lora_key}.weight", "lora_unet_guidance_in_out_layer.{orig_lora_key}.weight", None, ), ], sds_sd, ) if any("img_in" in k for k in sds_sd): assign_remaining_weights( [ ("x_embedder.{lora_key}.weight", "lora_unet_img_in.{orig_lora_key}.weight", None), ], sds_sd, ) if any("txt_in" in k for k in sds_sd): assign_remaining_weights( [ ("context_embedder.{lora_key}.weight", "lora_unet_txt_in.{orig_lora_key}.weight", None), ], sds_sd, ) if any("time_in" in k for k in sds_sd): assign_remaining_weights( [ ( "time_text_embed.timestep_embedder.linear_1.{lora_key}.weight", "lora_unet_time_in_in_layer.{orig_lora_key}.weight", None, ), ( "time_text_embed.timestep_embedder.linear_2.{lora_key}.weight", "lora_unet_time_in_out_layer.{orig_lora_key}.weight", None, ), ], sds_sd, ) if any("vector_in" in k for k in sds_sd): assign_remaining_weights( [ ( "time_text_embed.text_embedder.linear_1.{lora_key}.weight", "lora_unet_vector_in_in_layer.{orig_lora_key}.weight", None, ), ( "time_text_embed.text_embedder.linear_2.{lora_key}.weight", "lora_unet_vector_in_out_layer.{orig_lora_key}.weight", None, ), ], sds_sd, ) if any("final_layer" in k for k in sds_sd): # Notice the swap in processing for "final_layer". assign_remaining_weights( [ ( "norm_out.linear.{lora_key}.weight", "lora_unet_final_layer_adaLN_modulation_1.{orig_lora_key}.weight", swap_scale_shift, ), ("proj_out.{lora_key}.weight", "lora_unet_final_layer_linear.{orig_lora_key}.weight", None), ], sds_sd, ) remaining_keys = list(sds_sd.keys()) te_state_dict = {} if remaining_keys: if not all(k.startswith(("lora_te", "lora_te1")) for k in remaining_keys): raise ValueError(f"Incompatible keys detected: \n\n {', '.join(remaining_keys)}") for key in remaining_keys: if not key.endswith("lora_down.weight"): continue lora_name = key.split(".")[0] lora_name_up = f"{lora_name}.lora_up.weight" lora_name_alpha = f"{lora_name}.alpha" diffusers_name = _convert_text_encoder_lora_key(key, lora_name) if lora_name.startswith(("lora_te_", "lora_te1_")): down_weight = sds_sd.pop(key) sd_lora_rank = down_weight.shape[0] te_state_dict[diffusers_name] = down_weight te_state_dict[diffusers_name.replace(".down.", ".up.")] = sds_sd.pop(lora_name_up) if lora_name_alpha in sds_sd: alpha = sds_sd.pop(lora_name_alpha).item() scale = alpha / sd_lora_rank scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 te_state_dict[diffusers_name] *= scale_down te_state_dict[diffusers_name.replace(".down.", ".up.")] *= scale_up if len(sds_sd) > 0: logger.warning(f"Unsupported keys for ai-toolkit: {sds_sd.keys()}") if te_state_dict: te_state_dict = {f"text_encoder.{module_name}": params for module_name, params in te_state_dict.items()} new_state_dict = {**ait_sd, **te_state_dict} return new_state_dict def _convert_mixture_state_dict_to_diffusers(state_dict): new_state_dict = {} def _convert(original_key, diffusers_key, state_dict, new_state_dict): down_key = f"{original_key}.lora_down.weight" down_weight = state_dict.pop(down_key) lora_rank = down_weight.shape[0] up_weight_key = f"{original_key}.lora_up.weight" up_weight = state_dict.pop(up_weight_key) alpha_key = f"{original_key}.alpha" alpha = state_dict.pop(alpha_key) # scale weight by alpha and dim scale = alpha / lora_rank # calculate scale_down and scale_up scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 down_weight = down_weight * scale_down up_weight = up_weight * scale_up diffusers_down_key = f"{diffusers_key}.lora_A.weight" new_state_dict[diffusers_down_key] = down_weight new_state_dict[diffusers_down_key.replace(".lora_A.", ".lora_B.")] = up_weight all_unique_keys = { k.replace(".lora_down.weight", "").replace(".lora_up.weight", "").replace(".alpha", "") for k in state_dict if not k.startswith(("lora_unet_")) } assert all(k.startswith(("lora_transformer_", "lora_te1_")) for k in all_unique_keys), f"{all_unique_keys=}" has_te_keys = False for k in all_unique_keys: if k.startswith("lora_transformer_single_transformer_blocks_"): i = int(k.split("lora_transformer_single_transformer_blocks_")[-1].split("_")[0]) diffusers_key = f"single_transformer_blocks.{i}" elif k.startswith("lora_transformer_transformer_blocks_"): i = int(k.split("lora_transformer_transformer_blocks_")[-1].split("_")[0]) diffusers_key = f"transformer_blocks.{i}" elif k.startswith("lora_te1_"): has_te_keys = True continue elif k.startswith("lora_transformer_context_embedder"): diffusers_key = "context_embedder" elif k.startswith("lora_transformer_norm_out_linear"): diffusers_key = "norm_out.linear" elif k.startswith("lora_transformer_proj_out"): diffusers_key = "proj_out" elif k.startswith("lora_transformer_x_embedder"): diffusers_key = "x_embedder" elif k.startswith("lora_transformer_time_text_embed_guidance_embedder_linear_"): i = int(k.split("lora_transformer_time_text_embed_guidance_embedder_linear_")[-1]) diffusers_key = f"time_text_embed.guidance_embedder.linear_{i}" elif k.startswith("lora_transformer_time_text_embed_text_embedder_linear_"): i = int(k.split("lora_transformer_time_text_embed_text_embedder_linear_")[-1]) diffusers_key = f"time_text_embed.text_embedder.linear_{i}" elif k.startswith("lora_transformer_time_text_embed_timestep_embedder_linear_"): i = int(k.split("lora_transformer_time_text_embed_timestep_embedder_linear_")[-1]) diffusers_key = f"time_text_embed.timestep_embedder.linear_{i}" else: raise NotImplementedError(f"Handling for key ({k}) is not implemented.") if "attn_" in k: if "_to_out_0" in k: diffusers_key += ".attn.to_out.0" elif "_to_add_out" in k: diffusers_key += ".attn.to_add_out" elif any(qkv in k for qkv in ["to_q", "to_k", "to_v"]): remaining = k.split("attn_")[-1] diffusers_key += f".attn.{remaining}" elif any(add_qkv in k for add_qkv in ["add_q_proj", "add_k_proj", "add_v_proj"]): remaining = k.split("attn_")[-1] diffusers_key += f".attn.{remaining}" _convert(k, diffusers_key, state_dict, new_state_dict) if has_te_keys: layer_pattern = re.compile(r"lora_te1_text_model_encoder_layers_(\d+)") attn_mapping = { "q_proj": ".self_attn.q_proj", "k_proj": ".self_attn.k_proj", "v_proj": ".self_attn.v_proj", "out_proj": ".self_attn.out_proj", } mlp_mapping = {"fc1": ".mlp.fc1", "fc2": ".mlp.fc2"} for k in all_unique_keys: if not k.startswith("lora_te1_"): continue match = layer_pattern.search(k) if not match: continue i = int(match.group(1)) diffusers_key = f"text_model.encoder.layers.{i}" if "attn" in k: for key_fragment, suffix in attn_mapping.items(): if key_fragment in k: diffusers_key += suffix break elif "mlp" in k: for key_fragment, suffix in mlp_mapping.items(): if key_fragment in k: diffusers_key += suffix break _convert(k, diffusers_key, state_dict, new_state_dict) remaining_all_unet = False if state_dict: remaining_all_unet = all(k.startswith("lora_unet_") for k in state_dict) if remaining_all_unet: keys = list(state_dict.keys()) for k in keys: state_dict.pop(k) if len(state_dict) > 0: raise ValueError( f"Expected an empty state dict at this point but its has these keys which couldn't be parsed: {list(state_dict.keys())}." ) transformer_state_dict = { f"transformer.{k}": v for k, v in new_state_dict.items() if not k.startswith("text_model.") } te_state_dict = {f"text_encoder.{k}": v for k, v in new_state_dict.items() if k.startswith("text_model.")} return {**transformer_state_dict, **te_state_dict} # This is weird. # https://huggingface.co/sayakpaul/different-lora-from-civitai/tree/main?show_file_info=sharp_detailed_foot.safetensors # has both `peft` and non-peft state dict. has_peft_state_dict = any(k.startswith("transformer.") for k in state_dict) if has_peft_state_dict: state_dict = { k.replace("lora_down.weight", "lora_A.weight").replace("lora_up.weight", "lora_B.weight"): v for k, v in state_dict.items() if k.startswith("transformer.") } return state_dict # Another weird one. has_mixture = any( k.startswith("lora_transformer_") and ("lora_down" in k or "lora_up" in k or "alpha" in k) for k in state_dict ) # ComfyUI. if not has_mixture: state_dict = {k.replace("diffusion_model.", "lora_unet_"): v for k, v in state_dict.items()} state_dict = {k.replace("text_encoders.clip_l.transformer.", "lora_te_"): v for k, v in state_dict.items()} has_position_embedding = any("position_embedding" in k for k in state_dict) if has_position_embedding: zero_status_pe = state_dict_all_zero(state_dict, "position_embedding") if zero_status_pe: logger.info( "The `position_embedding` LoRA params are all zeros which make them ineffective. " "So, we will purge them out of the current state dict to make loading possible." ) else: logger.info( "The state_dict has position_embedding LoRA params and we currently do not support them. " "Open an issue if you need this supported - https://github.com/huggingface/diffusers/issues/new." ) state_dict = {k: v for k, v in state_dict.items() if "position_embedding" not in k} has_t5xxl = any(k.startswith("text_encoders.t5xxl.transformer.") for k in state_dict) if has_t5xxl: zero_status_t5 = state_dict_all_zero(state_dict, "text_encoders.t5xxl") if zero_status_t5: logger.info( "The `t5xxl` LoRA params are all zeros which make them ineffective. " "So, we will purge them out of the current state dict to make loading possible." ) else: logger.info( "T5-xxl keys found in the state dict, which are currently unsupported. We will filter them out." "Open an issue if this is a problem - https://github.com/huggingface/diffusers/issues/new." ) state_dict = {k: v for k, v in state_dict.items() if not k.startswith("text_encoders.t5xxl.transformer.")} has_diffb = any("diff_b" in k and k.startswith(("lora_unet_", "lora_te_")) for k in state_dict) if has_diffb: zero_status_diff_b = state_dict_all_zero(state_dict, ".diff_b") if zero_status_diff_b: logger.info( "The `diff_b` LoRA params are all zeros which make them ineffective. " "So, we will purge them out of the current state dict to make loading possible." ) else: logger.info( "`diff_b` keys found in the state dict which are currently unsupported. " "So, we will filter out those keys. Open an issue if this is a problem - " "https://github.com/huggingface/diffusers/issues/new." ) state_dict = {k: v for k, v in state_dict.items() if ".diff_b" not in k} has_norm_diff = any(".norm" in k and ".diff" in k for k in state_dict) if has_norm_diff: zero_status_diff = state_dict_all_zero(state_dict, ".diff") if zero_status_diff: logger.info( "The `diff` LoRA params are all zeros which make them ineffective. " "So, we will purge them out of the current state dict to make loading possible." ) else: logger.info( "Normalization diff keys found in the state dict which are currently unsupported. " "So, we will filter out those keys. Open an issue if this is a problem - " "https://github.com/huggingface/diffusers/issues/new." ) state_dict = {k: v for k, v in state_dict.items() if ".norm" not in k and ".diff" not in k} limit_substrings = ["lora_down", "lora_up"] if any("alpha" in k for k in state_dict): limit_substrings.append("alpha") state_dict = { _custom_replace(k, limit_substrings): v for k, v in state_dict.items() if k.startswith(("lora_unet_", "lora_te_")) } if any("text_projection" in k for k in state_dict): logger.info( "`text_projection` keys found in the `state_dict` which are unexpected. " "So, we will filter out those keys. Open an issue if this is a problem - " "https://github.com/huggingface/diffusers/issues/new." ) state_dict = {k: v for k, v in state_dict.items() if "text_projection" not in k} if has_mixture: return _convert_mixture_state_dict_to_diffusers(state_dict) return _convert_sd_scripts_to_ai_toolkit(state_dict) # Adapted from https://gist.github.com/Leommm-byte/6b331a1e9bd53271210b26543a7065d6 # Some utilities were reused from # https://github.com/kohya-ss/sd-scripts/blob/a61cf73a5cb5209c3f4d1a3688dd276a4dfd1ecb/networks/convert_flux_lora.py def _convert_xlabs_flux_lora_to_diffusers(old_state_dict): new_state_dict = {} orig_keys = list(old_state_dict.keys()) def handle_qkv(sds_sd, ait_sd, sds_key, ait_keys, dims=None): down_weight = sds_sd.pop(sds_key) up_weight = sds_sd.pop(sds_key.replace(".down.weight", ".up.weight")) # calculate dims if not provided num_splits = len(ait_keys) if dims is None: dims = [up_weight.shape[0] // num_splits] * num_splits else: assert sum(dims) == up_weight.shape[0] # make ai-toolkit weight ait_down_keys = [k + ".lora_A.weight" for k in ait_keys] ait_up_keys = [k + ".lora_B.weight" for k in ait_keys] # down_weight is copied to each split ait_sd.update(dict.fromkeys(ait_down_keys, down_weight)) # up_weight is split to each split ait_sd.update({k: v for k, v in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) # noqa: C416 for old_key in orig_keys: # Handle double_blocks if old_key.startswith(("diffusion_model.double_blocks", "double_blocks")): block_num = re.search(r"double_blocks\.(\d+)", old_key).group(1) new_key = f"transformer.transformer_blocks.{block_num}" if "processor.proj_lora1" in old_key: new_key += ".attn.to_out.0" elif "processor.proj_lora2" in old_key: new_key += ".attn.to_add_out" # Handle text latents. elif "processor.qkv_lora2" in old_key and "up" not in old_key: handle_qkv( old_state_dict, new_state_dict, old_key, [ f"transformer.transformer_blocks.{block_num}.attn.add_q_proj", f"transformer.transformer_blocks.{block_num}.attn.add_k_proj", f"transformer.transformer_blocks.{block_num}.attn.add_v_proj", ], ) # continue # Handle image latents. elif "processor.qkv_lora1" in old_key and "up" not in old_key: handle_qkv( old_state_dict, new_state_dict, old_key, [ f"transformer.transformer_blocks.{block_num}.attn.to_q", f"transformer.transformer_blocks.{block_num}.attn.to_k", f"transformer.transformer_blocks.{block_num}.attn.to_v", ], ) # continue if "down" in old_key: new_key += ".lora_A.weight" elif "up" in old_key: new_key += ".lora_B.weight" # Handle single_blocks elif old_key.startswith(("diffusion_model.single_blocks", "single_blocks")): block_num = re.search(r"single_blocks\.(\d+)", old_key).group(1) new_key = f"transformer.single_transformer_blocks.{block_num}" if "proj_lora" in old_key: new_key += ".proj_out" elif "qkv_lora" in old_key and "up" not in old_key: handle_qkv( old_state_dict, new_state_dict, old_key, [ f"transformer.single_transformer_blocks.{block_num}.attn.to_q", f"transformer.single_transformer_blocks.{block_num}.attn.to_k", f"transformer.single_transformer_blocks.{block_num}.attn.to_v", ], ) if "down" in old_key: new_key += ".lora_A.weight" elif "up" in old_key: new_key += ".lora_B.weight" else: # Handle other potential key patterns here new_key = old_key # Since we already handle qkv above. if "qkv" not in old_key: new_state_dict[new_key] = old_state_dict.pop(old_key) if len(old_state_dict) > 0: raise ValueError(f"`old_state_dict` should be at this point but has: {list(old_state_dict.keys())}.") return new_state_dict def _custom_replace(key: str, substrings: List[str]) -> str: # Replaces the "."s with "_"s upto the `substrings`. # Example: # lora_unet.foo.bar.lora_A.weight -> lora_unet_foo_bar.lora_A.weight pattern = "(" + "|".join(re.escape(sub) for sub in substrings) + ")" match = re.search(pattern, key) if match: start_sub = match.start() if start_sub > 0 and key[start_sub - 1] == ".": boundary = start_sub - 1 else: boundary = start_sub left = key[:boundary].replace(".", "_") right = key[boundary:] return left + right else: return key.replace(".", "_") def _convert_bfl_flux_control_lora_to_diffusers(original_state_dict): converted_state_dict = {} original_state_dict_keys = list(original_state_dict.keys()) num_layers = 19 num_single_layers = 38 inner_dim = 3072 mlp_ratio = 4.0 for lora_key in ["lora_A", "lora_B"]: ## time_text_embed.timestep_embedder <- time_in converted_state_dict[f"time_text_embed.timestep_embedder.linear_1.{lora_key}.weight"] = ( original_state_dict.pop(f"time_in.in_layer.{lora_key}.weight") ) if f"time_in.in_layer.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"time_text_embed.timestep_embedder.linear_1.{lora_key}.bias"] = ( original_state_dict.pop(f"time_in.in_layer.{lora_key}.bias") ) converted_state_dict[f"time_text_embed.timestep_embedder.linear_2.{lora_key}.weight"] = ( original_state_dict.pop(f"time_in.out_layer.{lora_key}.weight") ) if f"time_in.out_layer.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"time_text_embed.timestep_embedder.linear_2.{lora_key}.bias"] = ( original_state_dict.pop(f"time_in.out_layer.{lora_key}.bias") ) ## time_text_embed.text_embedder <- vector_in converted_state_dict[f"time_text_embed.text_embedder.linear_1.{lora_key}.weight"] = original_state_dict.pop( f"vector_in.in_layer.{lora_key}.weight" ) if f"vector_in.in_layer.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"time_text_embed.text_embedder.linear_1.{lora_key}.bias"] = original_state_dict.pop( f"vector_in.in_layer.{lora_key}.bias" ) converted_state_dict[f"time_text_embed.text_embedder.linear_2.{lora_key}.weight"] = original_state_dict.pop( f"vector_in.out_layer.{lora_key}.weight" ) if f"vector_in.out_layer.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"time_text_embed.text_embedder.linear_2.{lora_key}.bias"] = original_state_dict.pop( f"vector_in.out_layer.{lora_key}.bias" ) # guidance has_guidance = any("guidance" in k for k in original_state_dict) if has_guidance: converted_state_dict[f"time_text_embed.guidance_embedder.linear_1.{lora_key}.weight"] = ( original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.weight") ) if f"guidance_in.in_layer.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"time_text_embed.guidance_embedder.linear_1.{lora_key}.bias"] = ( original_state_dict.pop(f"guidance_in.in_layer.{lora_key}.bias") ) converted_state_dict[f"time_text_embed.guidance_embedder.linear_2.{lora_key}.weight"] = ( original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.weight") ) if f"guidance_in.out_layer.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"time_text_embed.guidance_embedder.linear_2.{lora_key}.bias"] = ( original_state_dict.pop(f"guidance_in.out_layer.{lora_key}.bias") ) # context_embedder converted_state_dict[f"context_embedder.{lora_key}.weight"] = original_state_dict.pop( f"txt_in.{lora_key}.weight" ) if f"txt_in.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"context_embedder.{lora_key}.bias"] = original_state_dict.pop( f"txt_in.{lora_key}.bias" ) # x_embedder converted_state_dict[f"x_embedder.{lora_key}.weight"] = original_state_dict.pop(f"img_in.{lora_key}.weight") if f"img_in.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"x_embedder.{lora_key}.bias"] = original_state_dict.pop(f"img_in.{lora_key}.bias") # double transformer blocks for i in range(num_layers): block_prefix = f"transformer_blocks.{i}." for lora_key in ["lora_A", "lora_B"]: # norms converted_state_dict[f"{block_prefix}norm1.linear.{lora_key}.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_mod.lin.{lora_key}.weight" ) if f"double_blocks.{i}.img_mod.lin.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}norm1.linear.{lora_key}.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_mod.lin.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}norm1_context.linear.{lora_key}.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_mod.lin.{lora_key}.weight" ) if f"double_blocks.{i}.txt_mod.lin.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}norm1_context.linear.{lora_key}.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_mod.lin.{lora_key}.bias" ) # Q, K, V if lora_key == "lora_A": sample_lora_weight = original_state_dict.pop(f"double_blocks.{i}.img_attn.qkv.{lora_key}.weight") converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.weight"] = torch.cat([sample_lora_weight]) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.weight"] = torch.cat([sample_lora_weight]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.weight"] = torch.cat([sample_lora_weight]) context_lora_weight = original_state_dict.pop(f"double_blocks.{i}.txt_attn.qkv.{lora_key}.weight") converted_state_dict[f"{block_prefix}attn.add_q_proj.{lora_key}.weight"] = torch.cat( [context_lora_weight] ) converted_state_dict[f"{block_prefix}attn.add_k_proj.{lora_key}.weight"] = torch.cat( [context_lora_weight] ) converted_state_dict[f"{block_prefix}attn.add_v_proj.{lora_key}.weight"] = torch.cat( [context_lora_weight] ) else: sample_q, sample_k, sample_v = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.img_attn.qkv.{lora_key}.weight"), 3, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.weight"] = torch.cat([sample_q]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.weight"] = torch.cat([sample_k]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.weight"] = torch.cat([sample_v]) context_q, context_k, context_v = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.txt_attn.qkv.{lora_key}.weight"), 3, dim=0 ) converted_state_dict[f"{block_prefix}attn.add_q_proj.{lora_key}.weight"] = torch.cat([context_q]) converted_state_dict[f"{block_prefix}attn.add_k_proj.{lora_key}.weight"] = torch.cat([context_k]) converted_state_dict[f"{block_prefix}attn.add_v_proj.{lora_key}.weight"] = torch.cat([context_v]) if f"double_blocks.{i}.img_attn.qkv.{lora_key}.bias" in original_state_dict_keys: sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.img_attn.qkv.{lora_key}.bias"), 3, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.bias"] = torch.cat([sample_v_bias]) if f"double_blocks.{i}.txt_attn.qkv.{lora_key}.bias" in original_state_dict_keys: context_q_bias, context_k_bias, context_v_bias = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.txt_attn.qkv.{lora_key}.bias"), 3, dim=0 ) converted_state_dict[f"{block_prefix}attn.add_q_proj.{lora_key}.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"{block_prefix}attn.add_k_proj.{lora_key}.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"{block_prefix}attn.add_v_proj.{lora_key}.bias"] = torch.cat([context_v_bias]) # ff img_mlp converted_state_dict[f"{block_prefix}ff.net.0.proj.{lora_key}.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.0.{lora_key}.weight" ) if f"double_blocks.{i}.img_mlp.0.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}ff.net.0.proj.{lora_key}.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.0.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}ff.net.2.{lora_key}.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.2.{lora_key}.weight" ) if f"double_blocks.{i}.img_mlp.2.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}ff.net.2.{lora_key}.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.2.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.0.proj.{lora_key}.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.0.{lora_key}.weight" ) if f"double_blocks.{i}.txt_mlp.0.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}ff_context.net.0.proj.{lora_key}.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.0.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.2.{lora_key}.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.2.{lora_key}.weight" ) if f"double_blocks.{i}.txt_mlp.2.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}ff_context.net.2.{lora_key}.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.2.{lora_key}.bias" ) # output projections. converted_state_dict[f"{block_prefix}attn.to_out.0.{lora_key}.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.proj.{lora_key}.weight" ) if f"double_blocks.{i}.img_attn.proj.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}attn.to_out.0.{lora_key}.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.proj.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}attn.to_add_out.{lora_key}.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.proj.{lora_key}.weight" ) if f"double_blocks.{i}.txt_attn.proj.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}attn.to_add_out.{lora_key}.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.proj.{lora_key}.bias" ) # qk_norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.norm.key_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_added_q.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_added_k.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.norm.key_norm.scale" ) # single transformer blocks for i in range(num_single_layers): block_prefix = f"single_transformer_blocks.{i}." for lora_key in ["lora_A", "lora_B"]: # norm.linear <- single_blocks.0.modulation.lin converted_state_dict[f"{block_prefix}norm.linear.{lora_key}.weight"] = original_state_dict.pop( f"single_blocks.{i}.modulation.lin.{lora_key}.weight" ) if f"single_blocks.{i}.modulation.lin.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}norm.linear.{lora_key}.bias"] = original_state_dict.pop( f"single_blocks.{i}.modulation.lin.{lora_key}.bias" ) # Q, K, V, mlp mlp_hidden_dim = int(inner_dim * mlp_ratio) split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim) if lora_key == "lora_A": lora_weight = original_state_dict.pop(f"single_blocks.{i}.linear1.{lora_key}.weight") converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.weight"] = torch.cat([lora_weight]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.weight"] = torch.cat([lora_weight]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.weight"] = torch.cat([lora_weight]) converted_state_dict[f"{block_prefix}proj_mlp.{lora_key}.weight"] = torch.cat([lora_weight]) if f"single_blocks.{i}.linear1.{lora_key}.bias" in original_state_dict_keys: lora_bias = original_state_dict.pop(f"single_blocks.{i}.linear1.{lora_key}.bias") converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.bias"] = torch.cat([lora_bias]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.bias"] = torch.cat([lora_bias]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.bias"] = torch.cat([lora_bias]) converted_state_dict[f"{block_prefix}proj_mlp.{lora_key}.bias"] = torch.cat([lora_bias]) else: q, k, v, mlp = torch.split( original_state_dict.pop(f"single_blocks.{i}.linear1.{lora_key}.weight"), split_size, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.weight"] = torch.cat([q]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.weight"] = torch.cat([k]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.weight"] = torch.cat([v]) converted_state_dict[f"{block_prefix}proj_mlp.{lora_key}.weight"] = torch.cat([mlp]) if f"single_blocks.{i}.linear1.{lora_key}.bias" in original_state_dict_keys: q_bias, k_bias, v_bias, mlp_bias = torch.split( original_state_dict.pop(f"single_blocks.{i}.linear1.{lora_key}.bias"), split_size, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.bias"] = torch.cat([q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.bias"] = torch.cat([k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.bias"] = torch.cat([v_bias]) converted_state_dict[f"{block_prefix}proj_mlp.{lora_key}.bias"] = torch.cat([mlp_bias]) # output projections. converted_state_dict[f"{block_prefix}proj_out.{lora_key}.weight"] = original_state_dict.pop( f"single_blocks.{i}.linear2.{lora_key}.weight" ) if f"single_blocks.{i}.linear2.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}proj_out.{lora_key}.bias"] = original_state_dict.pop( f"single_blocks.{i}.linear2.{lora_key}.bias" ) # qk norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = original_state_dict.pop( f"single_blocks.{i}.norm.query_norm.scale" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = original_state_dict.pop( f"single_blocks.{i}.norm.key_norm.scale" ) for lora_key in ["lora_A", "lora_B"]: converted_state_dict[f"proj_out.{lora_key}.weight"] = original_state_dict.pop( f"final_layer.linear.{lora_key}.weight" ) if f"final_layer.linear.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"proj_out.{lora_key}.bias"] = original_state_dict.pop( f"final_layer.linear.{lora_key}.bias" ) converted_state_dict[f"norm_out.linear.{lora_key}.weight"] = swap_scale_shift( original_state_dict.pop(f"final_layer.adaLN_modulation.1.{lora_key}.weight") ) if f"final_layer.adaLN_modulation.1.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"norm_out.linear.{lora_key}.bias"] = swap_scale_shift( original_state_dict.pop(f"final_layer.adaLN_modulation.1.{lora_key}.bias") ) if len(original_state_dict) > 0: raise ValueError(f"`original_state_dict` should be empty at this point but has {original_state_dict.keys()=}.") for key in list(converted_state_dict.keys()): converted_state_dict[f"transformer.{key}"] = converted_state_dict.pop(key) return converted_state_dict def _convert_fal_kontext_lora_to_diffusers(original_state_dict): converted_state_dict = {} original_state_dict_keys = list(original_state_dict.keys()) num_layers = 19 num_single_layers = 38 inner_dim = 3072 mlp_ratio = 4.0 # double transformer blocks for i in range(num_layers): block_prefix = f"transformer_blocks.{i}." original_block_prefix = "base_model.model." for lora_key in ["lora_A", "lora_B"]: # norms converted_state_dict[f"{block_prefix}norm1.linear.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_mod.lin.{lora_key}.weight" ) if f"double_blocks.{i}.img_mod.lin.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}norm1.linear.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_mod.lin.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}norm1_context.linear.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_mod.lin.{lora_key}.weight" ) # Q, K, V if lora_key == "lora_A": sample_lora_weight = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_attn.qkv.{lora_key}.weight" ) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.weight"] = torch.cat([sample_lora_weight]) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.weight"] = torch.cat([sample_lora_weight]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.weight"] = torch.cat([sample_lora_weight]) context_lora_weight = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_attn.qkv.{lora_key}.weight" ) converted_state_dict[f"{block_prefix}attn.add_q_proj.{lora_key}.weight"] = torch.cat( [context_lora_weight] ) converted_state_dict[f"{block_prefix}attn.add_k_proj.{lora_key}.weight"] = torch.cat( [context_lora_weight] ) converted_state_dict[f"{block_prefix}attn.add_v_proj.{lora_key}.weight"] = torch.cat( [context_lora_weight] ) else: sample_q, sample_k, sample_v = torch.chunk( original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_attn.qkv.{lora_key}.weight" ), 3, dim=0, ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.weight"] = torch.cat([sample_q]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.weight"] = torch.cat([sample_k]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.weight"] = torch.cat([sample_v]) context_q, context_k, context_v = torch.chunk( original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_attn.qkv.{lora_key}.weight" ), 3, dim=0, ) converted_state_dict[f"{block_prefix}attn.add_q_proj.{lora_key}.weight"] = torch.cat([context_q]) converted_state_dict[f"{block_prefix}attn.add_k_proj.{lora_key}.weight"] = torch.cat([context_k]) converted_state_dict[f"{block_prefix}attn.add_v_proj.{lora_key}.weight"] = torch.cat([context_v]) if f"double_blocks.{i}.img_attn.qkv.{lora_key}.bias" in original_state_dict_keys: sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( original_state_dict.pop(f"{original_block_prefix}double_blocks.{i}.img_attn.qkv.{lora_key}.bias"), 3, dim=0, ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.bias"] = torch.cat([sample_v_bias]) if f"double_blocks.{i}.txt_attn.qkv.{lora_key}.bias" in original_state_dict_keys: context_q_bias, context_k_bias, context_v_bias = torch.chunk( original_state_dict.pop(f"{original_block_prefix}double_blocks.{i}.txt_attn.qkv.{lora_key}.bias"), 3, dim=0, ) converted_state_dict[f"{block_prefix}attn.add_q_proj.{lora_key}.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"{block_prefix}attn.add_k_proj.{lora_key}.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"{block_prefix}attn.add_v_proj.{lora_key}.bias"] = torch.cat([context_v_bias]) # ff img_mlp converted_state_dict[f"{block_prefix}ff.net.0.proj.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_mlp.0.{lora_key}.weight" ) if f"{original_block_prefix}double_blocks.{i}.img_mlp.0.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}ff.net.0.proj.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_mlp.0.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}ff.net.2.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_mlp.2.{lora_key}.weight" ) if f"{original_block_prefix}double_blocks.{i}.img_mlp.2.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}ff.net.2.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_mlp.2.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.0.proj.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_mlp.0.{lora_key}.weight" ) if f"{original_block_prefix}double_blocks.{i}.txt_mlp.0.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}ff_context.net.0.proj.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_mlp.0.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.2.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_mlp.2.{lora_key}.weight" ) if f"{original_block_prefix}double_blocks.{i}.txt_mlp.2.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}ff_context.net.2.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_mlp.2.{lora_key}.bias" ) # output projections. converted_state_dict[f"{block_prefix}attn.to_out.0.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_attn.proj.{lora_key}.weight" ) if f"{original_block_prefix}double_blocks.{i}.img_attn.proj.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}attn.to_out.0.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.img_attn.proj.{lora_key}.bias" ) converted_state_dict[f"{block_prefix}attn.to_add_out.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_attn.proj.{lora_key}.weight" ) if f"{original_block_prefix}double_blocks.{i}.txt_attn.proj.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}attn.to_add_out.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}double_blocks.{i}.txt_attn.proj.{lora_key}.bias" ) # single transformer blocks for i in range(num_single_layers): block_prefix = f"single_transformer_blocks.{i}." for lora_key in ["lora_A", "lora_B"]: # norm.linear <- single_blocks.0.modulation.lin converted_state_dict[f"{block_prefix}norm.linear.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}single_blocks.{i}.modulation.lin.{lora_key}.weight" ) if f"{original_block_prefix}single_blocks.{i}.modulation.lin.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}norm.linear.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}single_blocks.{i}.modulation.lin.{lora_key}.bias" ) # Q, K, V, mlp mlp_hidden_dim = int(inner_dim * mlp_ratio) split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim) if lora_key == "lora_A": lora_weight = original_state_dict.pop( f"{original_block_prefix}single_blocks.{i}.linear1.{lora_key}.weight" ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.weight"] = torch.cat([lora_weight]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.weight"] = torch.cat([lora_weight]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.weight"] = torch.cat([lora_weight]) converted_state_dict[f"{block_prefix}proj_mlp.{lora_key}.weight"] = torch.cat([lora_weight]) if f"{original_block_prefix}single_blocks.{i}.linear1.{lora_key}.bias" in original_state_dict_keys: lora_bias = original_state_dict.pop(f"single_blocks.{i}.linear1.{lora_key}.bias") converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.bias"] = torch.cat([lora_bias]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.bias"] = torch.cat([lora_bias]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.bias"] = torch.cat([lora_bias]) converted_state_dict[f"{block_prefix}proj_mlp.{lora_key}.bias"] = torch.cat([lora_bias]) else: q, k, v, mlp = torch.split( original_state_dict.pop(f"{original_block_prefix}single_blocks.{i}.linear1.{lora_key}.weight"), split_size, dim=0, ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.weight"] = torch.cat([q]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.weight"] = torch.cat([k]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.weight"] = torch.cat([v]) converted_state_dict[f"{block_prefix}proj_mlp.{lora_key}.weight"] = torch.cat([mlp]) if f"{original_block_prefix}single_blocks.{i}.linear1.{lora_key}.bias" in original_state_dict_keys: q_bias, k_bias, v_bias, mlp_bias = torch.split( original_state_dict.pop(f"{original_block_prefix}single_blocks.{i}.linear1.{lora_key}.bias"), split_size, dim=0, ) converted_state_dict[f"{block_prefix}attn.to_q.{lora_key}.bias"] = torch.cat([q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.{lora_key}.bias"] = torch.cat([k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.{lora_key}.bias"] = torch.cat([v_bias]) converted_state_dict[f"{block_prefix}proj_mlp.{lora_key}.bias"] = torch.cat([mlp_bias]) # output projections. converted_state_dict[f"{block_prefix}proj_out.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}single_blocks.{i}.linear2.{lora_key}.weight" ) if f"{original_block_prefix}single_blocks.{i}.linear2.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"{block_prefix}proj_out.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}single_blocks.{i}.linear2.{lora_key}.bias" ) for lora_key in ["lora_A", "lora_B"]: converted_state_dict[f"proj_out.{lora_key}.weight"] = original_state_dict.pop( f"{original_block_prefix}final_layer.linear.{lora_key}.weight" ) if f"{original_block_prefix}final_layer.linear.{lora_key}.bias" in original_state_dict_keys: converted_state_dict[f"proj_out.{lora_key}.bias"] = original_state_dict.pop( f"{original_block_prefix}final_layer.linear.{lora_key}.bias" ) if len(original_state_dict) > 0: raise ValueError(f"`original_state_dict` should be empty at this point but has {original_state_dict.keys()=}.") for key in list(converted_state_dict.keys()): converted_state_dict[f"transformer.{key}"] = converted_state_dict.pop(key) return converted_state_dict def _convert_hunyuan_video_lora_to_diffusers(original_state_dict): converted_state_dict = {k: original_state_dict.pop(k) for k in list(original_state_dict.keys())} def remap_norm_scale_shift_(key, state_dict): weight = state_dict.pop(key) shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) state_dict[key.replace("final_layer.adaLN_modulation.1", "norm_out.linear")] = new_weight def remap_txt_in_(key, state_dict): def rename_key(key): new_key = key.replace("individual_token_refiner.blocks", "token_refiner.refiner_blocks") new_key = new_key.replace("adaLN_modulation.1", "norm_out.linear") new_key = new_key.replace("txt_in", "context_embedder") new_key = new_key.replace("t_embedder.mlp.0", "time_text_embed.timestep_embedder.linear_1") new_key = new_key.replace("t_embedder.mlp.2", "time_text_embed.timestep_embedder.linear_2") new_key = new_key.replace("c_embedder", "time_text_embed.text_embedder") new_key = new_key.replace("mlp", "ff") return new_key if "self_attn_qkv" in key: weight = state_dict.pop(key) to_q, to_k, to_v = weight.chunk(3, dim=0) state_dict[rename_key(key.replace("self_attn_qkv", "attn.to_q"))] = to_q state_dict[rename_key(key.replace("self_attn_qkv", "attn.to_k"))] = to_k state_dict[rename_key(key.replace("self_attn_qkv", "attn.to_v"))] = to_v else: state_dict[rename_key(key)] = state_dict.pop(key) def remap_img_attn_qkv_(key, state_dict): weight = state_dict.pop(key) if "lora_A" in key: state_dict[key.replace("img_attn_qkv", "attn.to_q")] = weight state_dict[key.replace("img_attn_qkv", "attn.to_k")] = weight state_dict[key.replace("img_attn_qkv", "attn.to_v")] = weight else: to_q, to_k, to_v = weight.chunk(3, dim=0) state_dict[key.replace("img_attn_qkv", "attn.to_q")] = to_q state_dict[key.replace("img_attn_qkv", "attn.to_k")] = to_k state_dict[key.replace("img_attn_qkv", "attn.to_v")] = to_v def remap_txt_attn_qkv_(key, state_dict): weight = state_dict.pop(key) if "lora_A" in key: state_dict[key.replace("txt_attn_qkv", "attn.add_q_proj")] = weight state_dict[key.replace("txt_attn_qkv", "attn.add_k_proj")] = weight state_dict[key.replace("txt_attn_qkv", "attn.add_v_proj")] = weight else: to_q, to_k, to_v = weight.chunk(3, dim=0) state_dict[key.replace("txt_attn_qkv", "attn.add_q_proj")] = to_q state_dict[key.replace("txt_attn_qkv", "attn.add_k_proj")] = to_k state_dict[key.replace("txt_attn_qkv", "attn.add_v_proj")] = to_v def remap_single_transformer_blocks_(key, state_dict): hidden_size = 3072 if "linear1.lora_A.weight" in key or "linear1.lora_B.weight" in key: linear1_weight = state_dict.pop(key) if "lora_A" in key: new_key = key.replace("single_blocks", "single_transformer_blocks").removesuffix( ".linear1.lora_A.weight" ) state_dict[f"{new_key}.attn.to_q.lora_A.weight"] = linear1_weight state_dict[f"{new_key}.attn.to_k.lora_A.weight"] = linear1_weight state_dict[f"{new_key}.attn.to_v.lora_A.weight"] = linear1_weight state_dict[f"{new_key}.proj_mlp.lora_A.weight"] = linear1_weight else: split_size = (hidden_size, hidden_size, hidden_size, linear1_weight.size(0) - 3 * hidden_size) q, k, v, mlp = torch.split(linear1_weight, split_size, dim=0) new_key = key.replace("single_blocks", "single_transformer_blocks").removesuffix( ".linear1.lora_B.weight" ) state_dict[f"{new_key}.attn.to_q.lora_B.weight"] = q state_dict[f"{new_key}.attn.to_k.lora_B.weight"] = k state_dict[f"{new_key}.attn.to_v.lora_B.weight"] = v state_dict[f"{new_key}.proj_mlp.lora_B.weight"] = mlp elif "linear1.lora_A.bias" in key or "linear1.lora_B.bias" in key: linear1_bias = state_dict.pop(key) if "lora_A" in key: new_key = key.replace("single_blocks", "single_transformer_blocks").removesuffix( ".linear1.lora_A.bias" ) state_dict[f"{new_key}.attn.to_q.lora_A.bias"] = linear1_bias state_dict[f"{new_key}.attn.to_k.lora_A.bias"] = linear1_bias state_dict[f"{new_key}.attn.to_v.lora_A.bias"] = linear1_bias state_dict[f"{new_key}.proj_mlp.lora_A.bias"] = linear1_bias else: split_size = (hidden_size, hidden_size, hidden_size, linear1_bias.size(0) - 3 * hidden_size) q_bias, k_bias, v_bias, mlp_bias = torch.split(linear1_bias, split_size, dim=0) new_key = key.replace("single_blocks", "single_transformer_blocks").removesuffix( ".linear1.lora_B.bias" ) state_dict[f"{new_key}.attn.to_q.lora_B.bias"] = q_bias state_dict[f"{new_key}.attn.to_k.lora_B.bias"] = k_bias state_dict[f"{new_key}.attn.to_v.lora_B.bias"] = v_bias state_dict[f"{new_key}.proj_mlp.lora_B.bias"] = mlp_bias else: new_key = key.replace("single_blocks", "single_transformer_blocks") new_key = new_key.replace("linear2", "proj_out") new_key = new_key.replace("q_norm", "attn.norm_q") new_key = new_key.replace("k_norm", "attn.norm_k") state_dict[new_key] = state_dict.pop(key) TRANSFORMER_KEYS_RENAME_DICT = { "img_in": "x_embedder", "time_in.mlp.0": "time_text_embed.timestep_embedder.linear_1", "time_in.mlp.2": "time_text_embed.timestep_embedder.linear_2", "guidance_in.mlp.0": "time_text_embed.guidance_embedder.linear_1", "guidance_in.mlp.2": "time_text_embed.guidance_embedder.linear_2", "vector_in.in_layer": "time_text_embed.text_embedder.linear_1", "vector_in.out_layer": "time_text_embed.text_embedder.linear_2", "double_blocks": "transformer_blocks", "img_attn_q_norm": "attn.norm_q", "img_attn_k_norm": "attn.norm_k", "img_attn_proj": "attn.to_out.0", "txt_attn_q_norm": "attn.norm_added_q", "txt_attn_k_norm": "attn.norm_added_k", "txt_attn_proj": "attn.to_add_out", "img_mod.linear": "norm1.linear", "img_norm1": "norm1.norm", "img_norm2": "norm2", "img_mlp": "ff", "txt_mod.linear": "norm1_context.linear", "txt_norm1": "norm1.norm", "txt_norm2": "norm2_context", "txt_mlp": "ff_context", "self_attn_proj": "attn.to_out.0", "modulation.linear": "norm.linear", "pre_norm": "norm.norm", "final_layer.norm_final": "norm_out.norm", "final_layer.linear": "proj_out", "fc1": "net.0.proj", "fc2": "net.2", "input_embedder": "proj_in", } TRANSFORMER_SPECIAL_KEYS_REMAP = { "txt_in": remap_txt_in_, "img_attn_qkv": remap_img_attn_qkv_, "txt_attn_qkv": remap_txt_attn_qkv_, "single_blocks": remap_single_transformer_blocks_, "final_layer.adaLN_modulation.1": remap_norm_scale_shift_, } # Some folks attempt to make their state dict compatible with diffusers by adding "transformer." prefix to all keys # and use their custom code. To make sure both "original" and "attempted diffusers" loras work as expected, we make # sure that both follow the same initial format by stripping off the "transformer." prefix. for key in list(converted_state_dict.keys()): if key.startswith("transformer."): converted_state_dict[key[len("transformer.") :]] = converted_state_dict.pop(key) if key.startswith("diffusion_model."): converted_state_dict[key[len("diffusion_model.") :]] = converted_state_dict.pop(key) # Rename and remap the state dict keys for key in list(converted_state_dict.keys()): new_key = key[:] for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) converted_state_dict[new_key] = converted_state_dict.pop(key) for key in list(converted_state_dict.keys()): for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, converted_state_dict) # Add back the "transformer." prefix for key in list(converted_state_dict.keys()): converted_state_dict[f"transformer.{key}"] = converted_state_dict.pop(key) return converted_state_dict def _convert_non_diffusers_lumina2_lora_to_diffusers(state_dict): # Remove "diffusion_model." prefix from keys. state_dict = {k[len("diffusion_model.") :]: v for k, v in state_dict.items()} converted_state_dict = {} def get_num_layers(keys, pattern): layers = set() for key in keys: match = re.search(pattern, key) if match: layers.add(int(match.group(1))) return len(layers) def process_block(prefix, index, convert_norm): # Process attention qkv: pop lora_A and lora_B weights. lora_down = state_dict.pop(f"{prefix}.{index}.attention.qkv.lora_A.weight") lora_up = state_dict.pop(f"{prefix}.{index}.attention.qkv.lora_B.weight") for attn_key in ["to_q", "to_k", "to_v"]: converted_state_dict[f"{prefix}.{index}.attn.{attn_key}.lora_A.weight"] = lora_down for attn_key, weight in zip(["to_q", "to_k", "to_v"], torch.split(lora_up, [2304, 768, 768], dim=0)): converted_state_dict[f"{prefix}.{index}.attn.{attn_key}.lora_B.weight"] = weight # Process attention out weights. converted_state_dict[f"{prefix}.{index}.attn.to_out.0.lora_A.weight"] = state_dict.pop( f"{prefix}.{index}.attention.out.lora_A.weight" ) converted_state_dict[f"{prefix}.{index}.attn.to_out.0.lora_B.weight"] = state_dict.pop( f"{prefix}.{index}.attention.out.lora_B.weight" ) # Process feed-forward weights for layers 1, 2, and 3. for layer in range(1, 4): converted_state_dict[f"{prefix}.{index}.feed_forward.linear_{layer}.lora_A.weight"] = state_dict.pop( f"{prefix}.{index}.feed_forward.w{layer}.lora_A.weight" ) converted_state_dict[f"{prefix}.{index}.feed_forward.linear_{layer}.lora_B.weight"] = state_dict.pop( f"{prefix}.{index}.feed_forward.w{layer}.lora_B.weight" ) if convert_norm: converted_state_dict[f"{prefix}.{index}.norm1.linear.lora_A.weight"] = state_dict.pop( f"{prefix}.{index}.adaLN_modulation.1.lora_A.weight" ) converted_state_dict[f"{prefix}.{index}.norm1.linear.lora_B.weight"] = state_dict.pop( f"{prefix}.{index}.adaLN_modulation.1.lora_B.weight" ) noise_refiner_pattern = r"noise_refiner\.(\d+)\." num_noise_refiner_layers = get_num_layers(state_dict.keys(), noise_refiner_pattern) for i in range(num_noise_refiner_layers): process_block("noise_refiner", i, convert_norm=True) context_refiner_pattern = r"context_refiner\.(\d+)\." num_context_refiner_layers = get_num_layers(state_dict.keys(), context_refiner_pattern) for i in range(num_context_refiner_layers): process_block("context_refiner", i, convert_norm=False) core_transformer_pattern = r"layers\.(\d+)\." num_core_transformer_layers = get_num_layers(state_dict.keys(), core_transformer_pattern) for i in range(num_core_transformer_layers): process_block("layers", i, convert_norm=True) if len(state_dict) > 0: raise ValueError(f"`state_dict` should be empty at this point but has {state_dict.keys()=}") for key in list(converted_state_dict.keys()): converted_state_dict[f"transformer.{key}"] = converted_state_dict.pop(key) return converted_state_dict def _convert_non_diffusers_wan_lora_to_diffusers(state_dict): converted_state_dict = {} original_state_dict = {k[len("diffusion_model.") :]: v for k, v in state_dict.items()} block_numbers = {int(k.split(".")[1]) for k in original_state_dict if k.startswith("blocks.")} min_block = min(block_numbers) max_block = max(block_numbers) is_i2v_lora = any("k_img" in k for k in original_state_dict) and any("v_img" in k for k in original_state_dict) lora_down_key = "lora_A" if any("lora_A" in k for k in original_state_dict) else "lora_down" lora_up_key = "lora_B" if any("lora_B" in k for k in original_state_dict) else "lora_up" has_time_projection_weight = any( k.startswith("time_projection") and k.endswith(".weight") for k in original_state_dict ) def get_alpha_scales(down_weight, alpha_key): rank = down_weight.shape[0] alpha = original_state_dict.pop(alpha_key).item() scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 return scale_down, scale_up for key in list(original_state_dict.keys()): if key.endswith((".diff", ".diff_b")) and "norm" in key: # NOTE: we don't support this because norm layer diff keys are just zeroed values. We can support it # in future if needed and they are not zeroed. original_state_dict.pop(key) logger.debug(f"Removing {key} key from the state dict as it is a norm diff key. This is unsupported.") if "time_projection" in key and not has_time_projection_weight: # AccVideo lora has diff bias keys but not the weight keys. This causes a weird problem where # our lora config adds the time proj lora layers, but we don't have the weights for them. # CausVid lora has the weight keys and the bias keys. original_state_dict.pop(key) # For the `diff_b` keys, we treat them as lora_bias. # https://huggingface.co/docs/peft/main/en/package_reference/lora#peft.LoraConfig.lora_bias for i in range(min_block, max_block + 1): # Self-attention for o, c in zip(["q", "k", "v", "o"], ["to_q", "to_k", "to_v", "to_out.0"]): alpha_key = f"blocks.{i}.self_attn.{o}.alpha" has_alpha = alpha_key in original_state_dict original_key_A = f"blocks.{i}.self_attn.{o}.{lora_down_key}.weight" converted_key_A = f"blocks.{i}.attn1.{c}.lora_A.weight" original_key_B = f"blocks.{i}.self_attn.{o}.{lora_up_key}.weight" converted_key_B = f"blocks.{i}.attn1.{c}.lora_B.weight" if has_alpha: down_weight = original_state_dict.pop(original_key_A) up_weight = original_state_dict.pop(original_key_B) scale_down, scale_up = get_alpha_scales(down_weight, alpha_key) converted_state_dict[converted_key_A] = down_weight * scale_down converted_state_dict[converted_key_B] = up_weight * scale_up else: if original_key_A in original_state_dict: converted_state_dict[converted_key_A] = original_state_dict.pop(original_key_A) if original_key_B in original_state_dict: converted_state_dict[converted_key_B] = original_state_dict.pop(original_key_B) original_key = f"blocks.{i}.self_attn.{o}.diff_b" converted_key = f"blocks.{i}.attn1.{c}.lora_B.bias" if original_key in original_state_dict: converted_state_dict[converted_key] = original_state_dict.pop(original_key) # Cross-attention for o, c in zip(["q", "k", "v", "o"], ["to_q", "to_k", "to_v", "to_out.0"]): alpha_key = f"blocks.{i}.cross_attn.{o}.alpha" has_alpha = alpha_key in original_state_dict original_key_A = f"blocks.{i}.cross_attn.{o}.{lora_down_key}.weight" converted_key_A = f"blocks.{i}.attn2.{c}.lora_A.weight" original_key_B = f"blocks.{i}.cross_attn.{o}.{lora_up_key}.weight" converted_key_B = f"blocks.{i}.attn2.{c}.lora_B.weight" if original_key_A in original_state_dict: down_weight = original_state_dict.pop(original_key_A) converted_state_dict[converted_key_A] = down_weight if original_key_B in original_state_dict: up_weight = original_state_dict.pop(original_key_B) converted_state_dict[converted_key_B] = up_weight if has_alpha: scale_down, scale_up = get_alpha_scales(down_weight, alpha_key) converted_state_dict[converted_key_A] *= scale_down converted_state_dict[converted_key_B] *= scale_up original_key = f"blocks.{i}.cross_attn.{o}.diff_b" converted_key = f"blocks.{i}.attn2.{c}.lora_B.bias" if original_key in original_state_dict: converted_state_dict[converted_key] = original_state_dict.pop(original_key) if is_i2v_lora: for o, c in zip(["k_img", "v_img"], ["add_k_proj", "add_v_proj"]): alpha_key = f"blocks.{i}.cross_attn.{o}.alpha" has_alpha = alpha_key in original_state_dict original_key_A = f"blocks.{i}.cross_attn.{o}.{lora_down_key}.weight" converted_key_A = f"blocks.{i}.attn2.{c}.lora_A.weight" original_key_B = f"blocks.{i}.cross_attn.{o}.{lora_up_key}.weight" converted_key_B = f"blocks.{i}.attn2.{c}.lora_B.weight" if original_key_A in original_state_dict: down_weight = original_state_dict.pop(original_key_A) converted_state_dict[converted_key_A] = down_weight if original_key_B in original_state_dict: up_weight = original_state_dict.pop(original_key_B) converted_state_dict[converted_key_B] = up_weight if has_alpha: scale_down, scale_up = get_alpha_scales(down_weight, alpha_key) converted_state_dict[converted_key_A] *= scale_down converted_state_dict[converted_key_B] *= scale_up original_key = f"blocks.{i}.cross_attn.{o}.diff_b" converted_key = f"blocks.{i}.attn2.{c}.lora_B.bias" if original_key in original_state_dict: converted_state_dict[converted_key] = original_state_dict.pop(original_key) # FFN for o, c in zip(["ffn.0", "ffn.2"], ["net.0.proj", "net.2"]): alpha_key = f"blocks.{i}.{o}.alpha" has_alpha = alpha_key in original_state_dict original_key_A = f"blocks.{i}.{o}.{lora_down_key}.weight" converted_key_A = f"blocks.{i}.ffn.{c}.lora_A.weight" original_key_B = f"blocks.{i}.{o}.{lora_up_key}.weight" converted_key_B = f"blocks.{i}.ffn.{c}.lora_B.weight" if original_key_A in original_state_dict: down_weight = original_state_dict.pop(original_key_A) converted_state_dict[converted_key_A] = down_weight if original_key_B in original_state_dict: up_weight = original_state_dict.pop(original_key_B) converted_state_dict[converted_key_B] = up_weight if has_alpha: scale_down, scale_up = get_alpha_scales(down_weight, alpha_key) converted_state_dict[converted_key_A] *= scale_down converted_state_dict[converted_key_B] *= scale_up original_key = f"blocks.{i}.{o}.diff_b" converted_key = f"blocks.{i}.ffn.{c}.lora_B.bias" if original_key in original_state_dict: converted_state_dict[converted_key] = original_state_dict.pop(original_key) # Remaining. if original_state_dict: if any("time_projection" in k for k in original_state_dict): original_key = f"time_projection.1.{lora_down_key}.weight" converted_key = "condition_embedder.time_proj.lora_A.weight" if original_key in original_state_dict: converted_state_dict[converted_key] = original_state_dict.pop(original_key) original_key = f"time_projection.1.{lora_up_key}.weight" converted_key = "condition_embedder.time_proj.lora_B.weight" if original_key in original_state_dict: converted_state_dict[converted_key] = original_state_dict.pop(original_key) if "time_projection.1.diff_b" in original_state_dict: converted_state_dict["condition_embedder.time_proj.lora_B.bias"] = original_state_dict.pop( "time_projection.1.diff_b" ) if any("head.head" in k for k in state_dict): converted_state_dict["proj_out.lora_A.weight"] = original_state_dict.pop( f"head.head.{lora_down_key}.weight" ) converted_state_dict["proj_out.lora_B.weight"] = original_state_dict.pop(f"head.head.{lora_up_key}.weight") if "head.head.diff_b" in original_state_dict: converted_state_dict["proj_out.lora_B.bias"] = original_state_dict.pop("head.head.diff_b") for text_time in ["text_embedding", "time_embedding"]: if any(text_time in k for k in original_state_dict): for b_n in [0, 2]: diffusers_b_n = 1 if b_n == 0 else 2 diffusers_name = ( "condition_embedder.text_embedder" if text_time == "text_embedding" else "condition_embedder.time_embedder" ) if any(f"{text_time}.{b_n}" in k for k in original_state_dict): converted_state_dict[f"{diffusers_name}.linear_{diffusers_b_n}.lora_A.weight"] = ( original_state_dict.pop(f"{text_time}.{b_n}.{lora_down_key}.weight") ) converted_state_dict[f"{diffusers_name}.linear_{diffusers_b_n}.lora_B.weight"] = ( original_state_dict.pop(f"{text_time}.{b_n}.{lora_up_key}.weight") ) if f"{text_time}.{b_n}.diff_b" in original_state_dict: converted_state_dict[f"{diffusers_name}.linear_{diffusers_b_n}.lora_B.bias"] = ( original_state_dict.pop(f"{text_time}.{b_n}.diff_b") ) for img_ours, img_theirs in [ ("ff.net.0.proj", "img_emb.proj.1"), ("ff.net.2", "img_emb.proj.3"), ]: original_key = f"{img_theirs}.{lora_down_key}.weight" converted_key = f"condition_embedder.image_embedder.{img_ours}.lora_A.weight" if original_key in original_state_dict: converted_state_dict[converted_key] = original_state_dict.pop(original_key) original_key = f"{img_theirs}.{lora_up_key}.weight" converted_key = f"condition_embedder.image_embedder.{img_ours}.lora_B.weight" if original_key in original_state_dict: converted_state_dict[converted_key] = original_state_dict.pop(original_key) bias_key_theirs = original_key.removesuffix(f".{lora_up_key}.weight") + ".diff_b" if bias_key_theirs in original_state_dict: bias_key = converted_key.removesuffix(".weight") + ".bias" converted_state_dict[bias_key] = original_state_dict.pop(bias_key_theirs) if len(original_state_dict) > 0: diff = all(".diff" in k for k in original_state_dict) if diff: diff_keys = {k for k in original_state_dict if k.endswith(".diff")} if not all("lora" not in k for k in diff_keys): raise ValueError logger.info( "The remaining `state_dict` contains `diff` keys which we do not handle yet. If you see performance issues, please file an issue: " "https://github.com/huggingface/diffusers//issues/new" ) else: raise ValueError(f"`state_dict` should be empty at this point but has {original_state_dict.keys()=}") for key in list(converted_state_dict.keys()): converted_state_dict[f"transformer.{key}"] = converted_state_dict.pop(key) return converted_state_dict def _convert_musubi_wan_lora_to_diffusers(state_dict): # https://github.com/kohya-ss/musubi-tuner converted_state_dict = {} original_state_dict = {k[len("lora_unet_") :]: v for k, v in state_dict.items()} num_blocks = len({k.split("blocks_")[1].split("_")[0] for k in original_state_dict}) is_i2v_lora = any("k_img" in k for k in original_state_dict) and any("v_img" in k for k in original_state_dict) def get_alpha_scales(down_weight, key): rank = down_weight.shape[0] alpha = original_state_dict.pop(key + ".alpha").item() scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 return scale_down, scale_up for i in range(num_blocks): # Self-attention for o, c in zip(["q", "k", "v", "o"], ["to_q", "to_k", "to_v", "to_out.0"]): down_weight = original_state_dict.pop(f"blocks_{i}_self_attn_{o}.lora_down.weight") up_weight = original_state_dict.pop(f"blocks_{i}_self_attn_{o}.lora_up.weight") scale_down, scale_up = get_alpha_scales(down_weight, f"blocks_{i}_self_attn_{o}") converted_state_dict[f"blocks.{i}.attn1.{c}.lora_A.weight"] = down_weight * scale_down converted_state_dict[f"blocks.{i}.attn1.{c}.lora_B.weight"] = up_weight * scale_up # Cross-attention for o, c in zip(["q", "k", "v", "o"], ["to_q", "to_k", "to_v", "to_out.0"]): down_weight = original_state_dict.pop(f"blocks_{i}_cross_attn_{o}.lora_down.weight") up_weight = original_state_dict.pop(f"blocks_{i}_cross_attn_{o}.lora_up.weight") scale_down, scale_up = get_alpha_scales(down_weight, f"blocks_{i}_cross_attn_{o}") converted_state_dict[f"blocks.{i}.attn2.{c}.lora_A.weight"] = down_weight * scale_down converted_state_dict[f"blocks.{i}.attn2.{c}.lora_B.weight"] = up_weight * scale_up if is_i2v_lora: for o, c in zip(["k_img", "v_img"], ["add_k_proj", "add_v_proj"]): down_weight = original_state_dict.pop(f"blocks_{i}_cross_attn_{o}.lora_down.weight") up_weight = original_state_dict.pop(f"blocks_{i}_cross_attn_{o}.lora_up.weight") scale_down, scale_up = get_alpha_scales(down_weight, f"blocks_{i}_cross_attn_{o}") converted_state_dict[f"blocks.{i}.attn2.{c}.lora_A.weight"] = down_weight * scale_down converted_state_dict[f"blocks.{i}.attn2.{c}.lora_B.weight"] = up_weight * scale_up # FFN for o, c in zip(["ffn_0", "ffn_2"], ["net.0.proj", "net.2"]): down_weight = original_state_dict.pop(f"blocks_{i}_{o}.lora_down.weight") up_weight = original_state_dict.pop(f"blocks_{i}_{o}.lora_up.weight") scale_down, scale_up = get_alpha_scales(down_weight, f"blocks_{i}_{o}") converted_state_dict[f"blocks.{i}.ffn.{c}.lora_A.weight"] = down_weight * scale_down converted_state_dict[f"blocks.{i}.ffn.{c}.lora_B.weight"] = up_weight * scale_up if len(original_state_dict) > 0: raise ValueError(f"`state_dict` should be empty at this point but has {original_state_dict.keys()=}") for key in list(converted_state_dict.keys()): converted_state_dict[f"transformer.{key}"] = converted_state_dict.pop(key) return converted_state_dict def _convert_non_diffusers_hidream_lora_to_diffusers(state_dict, non_diffusers_prefix="diffusion_model"): if not all(k.startswith(non_diffusers_prefix) for k in state_dict): raise ValueError("Invalid LoRA state dict for HiDream.") converted_state_dict = {k.removeprefix(f"{non_diffusers_prefix}."): v for k, v in state_dict.items()} converted_state_dict = {f"transformer.{k}": v for k, v in converted_state_dict.items()} return converted_state_dict def _convert_non_diffusers_ltxv_lora_to_diffusers(state_dict, non_diffusers_prefix="diffusion_model"): if not all(k.startswith(f"{non_diffusers_prefix}.") for k in state_dict): raise ValueError("Invalid LoRA state dict for LTX-Video.") converted_state_dict = {k.removeprefix(f"{non_diffusers_prefix}."): v for k, v in state_dict.items()} converted_state_dict = {f"transformer.{k}": v for k, v in converted_state_dict.items()} return converted_state_dict def _convert_non_diffusers_qwen_lora_to_diffusers(state_dict): has_lora_unet = any(k.startswith("lora_unet_") for k in state_dict) if has_lora_unet: state_dict = {k.removeprefix("lora_unet_"): v for k, v in state_dict.items()} def convert_key(key: str) -> str: prefix = "transformer_blocks" if "." in key: base, suffix = key.rsplit(".", 1) else: base, suffix = key, "" start = f"{prefix}_" rest = base[len(start) :] if "." in rest: head, tail = rest.split(".", 1) tail = "." + tail else: head, tail = rest, "" # Protected n-grams that must keep their internal underscores protected = { # pairs ("to", "q"), ("to", "k"), ("to", "v"), ("to", "out"), ("add", "q"), ("add", "k"), ("add", "v"), ("txt", "mlp"), ("img", "mlp"), ("txt", "mod"), ("img", "mod"), # triplets ("add", "q", "proj"), ("add", "k", "proj"), ("add", "v", "proj"), ("to", "add", "out"), } prot_by_len = {} for ng in protected: prot_by_len.setdefault(len(ng), set()).add(ng) parts = head.split("_") merged = [] i = 0 lengths_desc = sorted(prot_by_len.keys(), reverse=True) while i < len(parts): matched = False for L in lengths_desc: if i + L <= len(parts) and tuple(parts[i : i + L]) in prot_by_len[L]: merged.append("_".join(parts[i : i + L])) i += L matched = True break if not matched: merged.append(parts[i]) i += 1 head_converted = ".".join(merged) converted_base = f"{prefix}.{head_converted}{tail}" return converted_base + (("." + suffix) if suffix else "") state_dict = {convert_key(k): v for k, v in state_dict.items()} converted_state_dict = {} all_keys = list(state_dict.keys()) down_key = ".lora_down.weight" up_key = ".lora_up.weight" def get_alpha_scales(down_weight, alpha_key): rank = down_weight.shape[0] alpha = state_dict.pop(alpha_key).item() scale = alpha / rank # LoRA is scaled by 'alpha / rank' in forward pass, so we need to scale it back here scale_down = scale scale_up = 1.0 while scale_down * 2 < scale_up: scale_down *= 2 scale_up /= 2 return scale_down, scale_up for k in all_keys: if k.endswith(down_key): diffusers_down_key = k.replace(down_key, ".lora_A.weight") diffusers_up_key = k.replace(down_key, up_key).replace(up_key, ".lora_B.weight") alpha_key = k.replace(down_key, ".alpha") down_weight = state_dict.pop(k) up_weight = state_dict.pop(k.replace(down_key, up_key)) scale_down, scale_up = get_alpha_scales(down_weight, alpha_key) converted_state_dict[diffusers_down_key] = down_weight * scale_down converted_state_dict[diffusers_up_key] = up_weight * scale_up if len(state_dict) > 0: raise ValueError(f"`state_dict` should be empty at this point but has {state_dict.keys()=}") converted_state_dict = {f"transformer.{k}": v for k, v in converted_state_dict.items()} return converted_state_dict
diffusers/src/diffusers/loaders/lora_conversion_utils.py/0
{ "file_path": "diffusers/src/diffusers/loaders/lora_conversion_utils.py", "repo_id": "diffusers", "token_count": 56583 }
160
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from ..utils import deprecate, logging from ..utils.import_utils import is_torch_npu_available, is_torch_xla_available, is_xformers_available from ..utils.torch_utils import maybe_allow_in_graph from .activations import GEGLU, GELU, ApproximateGELU, FP32SiLU, LinearActivation, SwiGLU from .attention_processor import Attention, AttentionProcessor, JointAttnProcessor2_0 from .embeddings import SinusoidalPositionalEmbedding from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm, SD35AdaLayerNormZeroX if is_xformers_available(): import xformers as xops else: xops = None logger = logging.get_logger(__name__) class AttentionMixin: @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. """ for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") for module in self.modules(): if isinstance(module, AttentionModuleMixin): module.fuse_projections() def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ for module in self.modules(): if isinstance(module, AttentionModuleMixin): module.unfuse_projections() class AttentionModuleMixin: _default_processor_cls = None _available_processors = [] fused_projections = False def set_processor(self, processor: AttentionProcessor) -> None: """ Set the attention processor to use. Args: processor (`AttnProcessor`): The attention processor to use. """ # if current processor is in `self._modules` and if passed `processor` is not, we need to # pop `processor` from `self._modules` if ( hasattr(self, "processor") and isinstance(self.processor, torch.nn.Module) and not isinstance(processor, torch.nn.Module) ): logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}") self._modules.pop("processor") self.processor = processor def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": """ Get the attention processor in use. Args: return_deprecated_lora (`bool`, *optional*, defaults to `False`): Set to `True` to return the deprecated LoRA attention processor. Returns: "AttentionProcessor": The attention processor in use. """ if not return_deprecated_lora: return self.processor def set_attention_backend(self, backend: str): from .attention_dispatch import AttentionBackendName available_backends = {x.value for x in AttentionBackendName.__members__.values()} if backend not in available_backends: raise ValueError(f"`{backend=}` must be one of the following: " + ", ".join(available_backends)) backend = AttentionBackendName(backend.lower()) self.processor._attention_backend = backend def set_use_npu_flash_attention(self, use_npu_flash_attention: bool) -> None: """ Set whether to use NPU flash attention from `torch_npu` or not. Args: use_npu_flash_attention (`bool`): Whether to use NPU flash attention or not. """ if use_npu_flash_attention: if not is_torch_npu_available(): raise ImportError("torch_npu is not available") self.set_attention_backend("_native_npu") def set_use_xla_flash_attention( self, use_xla_flash_attention: bool, partition_spec: Optional[Tuple[Optional[str], ...]] = None, is_flux=False, ) -> None: """ Set whether to use XLA flash attention from `torch_xla` or not. Args: use_xla_flash_attention (`bool`): Whether to use pallas flash attention kernel from `torch_xla` or not. partition_spec (`Tuple[]`, *optional*): Specify the partition specification if using SPMD. Otherwise None. is_flux (`bool`, *optional*, defaults to `False`): Whether the model is a Flux model. """ if use_xla_flash_attention: if not is_torch_xla_available(): raise ImportError("torch_xla is not available") self.set_attention_backend("_native_xla") def set_use_memory_efficient_attention_xformers( self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None ) -> None: """ Set whether to use memory efficient attention from `xformers` or not. Args: use_memory_efficient_attention_xformers (`bool`): Whether to use memory efficient attention from `xformers` or not. attention_op (`Callable`, *optional*): The attention operation to use. Defaults to `None` which uses the default attention operation from `xformers`. """ if use_memory_efficient_attention_xformers: if not is_xformers_available(): raise ModuleNotFoundError( "Refer to https://github.com/facebookresearch/xformers for more information on how to install xformers", name="xformers", ) elif not torch.cuda.is_available(): raise ValueError( "torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is" " only available for GPU " ) else: try: # Make sure we can run the memory efficient attention if is_xformers_available(): dtype = None if attention_op is not None: op_fw, op_bw = attention_op dtype, *_ = op_fw.SUPPORTED_DTYPES q = torch.randn((1, 2, 40), device="cuda", dtype=dtype) _ = xops.memory_efficient_attention(q, q, q) except Exception as e: raise e self.set_attention_backend("xformers") @torch.no_grad() def fuse_projections(self): """ Fuse the query, key, and value projections into a single projection for efficiency. """ # Skip if already fused if getattr(self, "fused_projections", False): return device = self.to_q.weight.data.device dtype = self.to_q.weight.data.dtype if hasattr(self, "is_cross_attention") and self.is_cross_attention: # Fuse cross-attention key-value projections concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_kv = nn.Linear(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) self.to_kv.weight.copy_(concatenated_weights) if hasattr(self, "use_bias") and self.use_bias: concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) self.to_kv.bias.copy_(concatenated_bias) else: # Fuse self-attention projections concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_qkv = nn.Linear(in_features, out_features, bias=self.use_bias, device=device, dtype=dtype) self.to_qkv.weight.copy_(concatenated_weights) if hasattr(self, "use_bias") and self.use_bias: concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) self.to_qkv.bias.copy_(concatenated_bias) # Handle added projections for models like SD3, Flux, etc. if ( getattr(self, "add_q_proj", None) is not None and getattr(self, "add_k_proj", None) is not None and getattr(self, "add_v_proj", None) is not None ): concatenated_weights = torch.cat( [self.add_q_proj.weight.data, self.add_k_proj.weight.data, self.add_v_proj.weight.data] ) in_features = concatenated_weights.shape[1] out_features = concatenated_weights.shape[0] self.to_added_qkv = nn.Linear( in_features, out_features, bias=self.added_proj_bias, device=device, dtype=dtype ) self.to_added_qkv.weight.copy_(concatenated_weights) if self.added_proj_bias: concatenated_bias = torch.cat( [self.add_q_proj.bias.data, self.add_k_proj.bias.data, self.add_v_proj.bias.data] ) self.to_added_qkv.bias.copy_(concatenated_bias) self.fused_projections = True @torch.no_grad() def unfuse_projections(self): """ Unfuse the query, key, and value projections back to separate projections. """ # Skip if not fused if not getattr(self, "fused_projections", False): return # Remove fused projection layers if hasattr(self, "to_qkv"): delattr(self, "to_qkv") if hasattr(self, "to_kv"): delattr(self, "to_kv") if hasattr(self, "to_added_qkv"): delattr(self, "to_added_qkv") self.fused_projections = False def set_attention_slice(self, slice_size: int) -> None: """ Set the slice size for attention computation. Args: slice_size (`int`): The slice size for attention computation. """ if hasattr(self, "sliceable_head_dim") and slice_size is not None and slice_size > self.sliceable_head_dim: raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") processor = None # Try to get a compatible processor for sliced attention if slice_size is not None: processor = self._get_compatible_processor("sliced") # If no processor was found or slice_size is None, use default processor if processor is None: processor = self.default_processor_cls() self.set_processor(processor) def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: """ Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. Args: tensor (`torch.Tensor`): The tensor to reshape. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads batch_size, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: """ Reshape the tensor for multi-head attention processing. Args: tensor (`torch.Tensor`): The tensor to reshape. out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. Returns: `torch.Tensor`: The reshaped tensor. """ head_size = self.heads if tensor.ndim == 3: batch_size, seq_len, dim = tensor.shape extra_dim = 1 else: batch_size, extra_dim, seq_len, dim = tensor.shape tensor = tensor.reshape(batch_size, seq_len * extra_dim, head_size, dim // head_size) tensor = tensor.permute(0, 2, 1, 3) if out_dim == 3: tensor = tensor.reshape(batch_size * head_size, seq_len * extra_dim, dim // head_size) return tensor def get_attention_scores( self, query: torch.Tensor, key: torch.Tensor, attention_mask: Optional[torch.Tensor] = None ) -> torch.Tensor: """ Compute the attention scores. Args: query (`torch.Tensor`): The query tensor. key (`torch.Tensor`): The key tensor. attention_mask (`torch.Tensor`, *optional*): The attention mask to use. Returns: `torch.Tensor`: The attention probabilities/scores. """ dtype = query.dtype if self.upcast_attention: query = query.float() key = key.float() if attention_mask is None: baddbmm_input = torch.empty( query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device ) beta = 0 else: baddbmm_input = attention_mask beta = 1 attention_scores = torch.baddbmm( baddbmm_input, query, key.transpose(-1, -2), beta=beta, alpha=self.scale, ) del baddbmm_input if self.upcast_softmax: attention_scores = attention_scores.float() attention_probs = attention_scores.softmax(dim=-1) del attention_scores attention_probs = attention_probs.to(dtype) return attention_probs def prepare_attention_mask( self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3 ) -> torch.Tensor: """ Prepare the attention mask for the attention computation. Args: attention_mask (`torch.Tensor`): The attention mask to prepare. target_length (`int`): The target length of the attention mask. batch_size (`int`): The batch size for repeating the attention mask. out_dim (`int`, *optional*, defaults to `3`): Output dimension. Returns: `torch.Tensor`: The prepared attention mask. """ head_size = self.heads if attention_mask is None: return attention_mask current_length: int = attention_mask.shape[-1] if current_length != target_length: if attention_mask.device.type == "mps": # HACK: MPS: Does not support padding by greater than dimension of input tensor. # Instead, we can manually construct the padding tensor. padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length) padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat([attention_mask, padding], dim=2) else: # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: # we want to instead pad by (0, remaining_length), where remaining_length is: # remaining_length: int = target_length - current_length # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) if out_dim == 3: if attention_mask.shape[0] < batch_size * head_size: attention_mask = attention_mask.repeat_interleave(head_size, dim=0) elif out_dim == 4: attention_mask = attention_mask.unsqueeze(1) attention_mask = attention_mask.repeat_interleave(head_size, dim=1) return attention_mask def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: """ Normalize the encoder hidden states. Args: encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. Returns: `torch.Tensor`: The normalized encoder hidden states. """ assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states" if isinstance(self.norm_cross, nn.LayerNorm): encoder_hidden_states = self.norm_cross(encoder_hidden_states) elif isinstance(self.norm_cross, nn.GroupNorm): # Group norm norms along the channels dimension and expects # input to be in the shape of (N, C, *). In this case, we want # to norm along the hidden dimension, so we need to move # (batch_size, sequence_length, hidden_size) -> # (batch_size, hidden_size, sequence_length) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) encoder_hidden_states = self.norm_cross(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.transpose(1, 2) else: assert False return encoder_hidden_states def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int): # "feed_forward_chunk_size" can be used to save memory if hidden_states.shape[chunk_dim] % chunk_size != 0: raise ValueError( f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." ) num_chunks = hidden_states.shape[chunk_dim] // chunk_size ff_output = torch.cat( [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], dim=chunk_dim, ) return ff_output @maybe_allow_in_graph class GatedSelfAttentionDense(nn.Module): r""" A gated self-attention dense layer that combines visual features and object features. Parameters: query_dim (`int`): The number of channels in the query. context_dim (`int`): The number of channels in the context. n_heads (`int`): The number of heads to use for attention. d_head (`int`): The number of channels in each head. """ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): super().__init__() # we need a linear projection since we need cat visual feature and obj feature self.linear = nn.Linear(context_dim, query_dim) self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) self.ff = FeedForward(query_dim, activation_fn="geglu") self.norm1 = nn.LayerNorm(query_dim) self.norm2 = nn.LayerNorm(query_dim) self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0))) self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0))) self.enabled = True def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: if not self.enabled: return x n_visual = x.shape[1] objs = self.linear(objs) x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) return x @maybe_allow_in_graph class JointTransformerBlock(nn.Module): r""" A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3. Reference: https://huggingface.co/papers/2403.03206 Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. context_pre_only (`bool`): Boolean to determine if we should add some blocks associated with the processing of `context` conditions. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, context_pre_only: bool = False, qk_norm: Optional[str] = None, use_dual_attention: bool = False, ): super().__init__() self.use_dual_attention = use_dual_attention self.context_pre_only = context_pre_only context_norm_type = "ada_norm_continous" if context_pre_only else "ada_norm_zero" if use_dual_attention: self.norm1 = SD35AdaLayerNormZeroX(dim) else: self.norm1 = AdaLayerNormZero(dim) if context_norm_type == "ada_norm_continous": self.norm1_context = AdaLayerNormContinuous( dim, dim, elementwise_affine=False, eps=1e-6, bias=True, norm_type="layer_norm" ) elif context_norm_type == "ada_norm_zero": self.norm1_context = AdaLayerNormZero(dim) else: raise ValueError( f"Unknown context_norm_type: {context_norm_type}, currently only support `ada_norm_continous`, `ada_norm_zero`" ) if hasattr(F, "scaled_dot_product_attention"): processor = JointAttnProcessor2_0() else: raise ValueError( "The current PyTorch version does not support the `scaled_dot_product_attention` function." ) self.attn = Attention( query_dim=dim, cross_attention_dim=None, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=context_pre_only, bias=True, processor=processor, qk_norm=qk_norm, eps=1e-6, ) if use_dual_attention: self.attn2 = Attention( query_dim=dim, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, processor=processor, qk_norm=qk_norm, eps=1e-6, ) else: self.attn2 = None self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") if not context_pre_only: self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") else: self.norm2_context = None self.ff_context = None # let chunk size default to None self._chunk_size = None self._chunk_dim = 0 # Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor, joint_attention_kwargs: Optional[Dict[str, Any]] = None, ): joint_attention_kwargs = joint_attention_kwargs or {} if self.use_dual_attention: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, norm_hidden_states2, gate_msa2 = self.norm1( hidden_states, emb=temb ) else: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) if self.context_pre_only: norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, temb) else: norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) # Attention. attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, **joint_attention_kwargs, ) # Process attention outputs for the `hidden_states`. attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output if self.use_dual_attention: attn_output2 = self.attn2(hidden_states=norm_hidden_states2, **joint_attention_kwargs) attn_output2 = gate_msa2.unsqueeze(1) * attn_output2 hidden_states = hidden_states + attn_output2 norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output # Process attention outputs for the `encoder_hidden_states`. if self.context_pre_only: encoder_hidden_states = None else: context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory context_ff_output = _chunked_feed_forward( self.ff_context, norm_encoder_hidden_states, self._chunk_dim, self._chunk_size ) else: context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output return encoder_hidden_states, hidden_states @maybe_allow_in_graph class BasicTransformerBlock(nn.Module): r""" A basic Transformer block. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (: obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (: obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, *optional*): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, *optional*): Whether to upcast the attention computation to float32. This is useful for mixed precision training. norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. final_dropout (`bool` *optional*, defaults to False): Whether to apply a final dropout after the last feed-forward layer. attention_type (`str`, *optional*, defaults to `"default"`): The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. positional_embeddings (`str`, *optional*, defaults to `None`): The type of positional embeddings to apply to. num_positional_embeddings (`int`, *optional*, defaults to `None`): The maximum number of positional embeddings to apply. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen' norm_eps: float = 1e-5, final_dropout: bool = False, attention_type: str = "default", positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ada_norm_continous_conditioning_embedding_dim: Optional[int] = None, ada_norm_bias: Optional[int] = None, ff_inner_dim: Optional[int] = None, ff_bias: bool = True, attention_out_bias: bool = True, ): super().__init__() self.dim = dim self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.dropout = dropout self.cross_attention_dim = cross_attention_dim self.activation_fn = activation_fn self.attention_bias = attention_bias self.double_self_attention = double_self_attention self.norm_elementwise_affine = norm_elementwise_affine self.positional_embeddings = positional_embeddings self.num_positional_embeddings = num_positional_embeddings self.only_cross_attention = only_cross_attention # We keep these boolean flags for backward-compatibility. self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.use_ada_layer_norm_single = norm_type == "ada_norm_single" self.use_layer_norm = norm_type == "layer_norm" self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) self.norm_type = norm_type self.num_embeds_ada_norm = num_embeds_ada_norm if positional_embeddings and (num_positional_embeddings is None): raise ValueError( "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." ) if positional_embeddings == "sinusoidal": self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) else: self.pos_embed = None # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if norm_type == "ada_norm": self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) elif norm_type == "ada_norm_zero": self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) elif norm_type == "ada_norm_continuous": self.norm1 = AdaLayerNormContinuous( dim, ada_norm_continous_conditioning_embedding_dim, norm_elementwise_affine, norm_eps, ada_norm_bias, "rms_norm", ) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, out_bias=attention_out_bias, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. if norm_type == "ada_norm": self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm) elif norm_type == "ada_norm_continuous": self.norm2 = AdaLayerNormContinuous( dim, ada_norm_continous_conditioning_embedding_dim, norm_elementwise_affine, norm_eps, ada_norm_bias, "rms_norm", ) else: self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, out_bias=attention_out_bias, ) # is self-attn if encoder_hidden_states is none else: if norm_type == "ada_norm_single": # For Latte self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) else: self.norm2 = None self.attn2 = None # 3. Feed-forward if norm_type == "ada_norm_continuous": self.norm3 = AdaLayerNormContinuous( dim, ada_norm_continous_conditioning_embedding_dim, norm_elementwise_affine, norm_eps, ada_norm_bias, "layer_norm", ) elif norm_type in ["ada_norm_zero", "ada_norm", "layer_norm"]: self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) elif norm_type == "layer_norm_i2vgen": self.norm3 = None self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias, ) # 4. Fuser if attention_type == "gated" or attention_type == "gated-text-image": self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) # 5. Scale-shift for PixArt-Alpha. if norm_type == "ada_norm_single": self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) # let chunk size default to None self._chunk_size = None self._chunk_dim = 0 def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, class_labels: Optional[torch.LongTensor] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] if self.norm_type == "ada_norm": norm_hidden_states = self.norm1(hidden_states, timestep) elif self.norm_type == "ada_norm_zero": norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype ) elif self.norm_type in ["layer_norm", "layer_norm_i2vgen"]: norm_hidden_states = self.norm1(hidden_states) elif self.norm_type == "ada_norm_continuous": norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"]) elif self.norm_type == "ada_norm_single": shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1) ).chunk(6, dim=1) norm_hidden_states = self.norm1(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa else: raise ValueError("Incorrect norm used") if self.pos_embed is not None: norm_hidden_states = self.pos_embed(norm_hidden_states) # 1. Prepare GLIGEN inputs cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} gligen_kwargs = cross_attention_kwargs.pop("gligen", None) attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) if self.norm_type == "ada_norm_zero": attn_output = gate_msa.unsqueeze(1) * attn_output elif self.norm_type == "ada_norm_single": attn_output = gate_msa * attn_output hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) # 1.2 GLIGEN Control if gligen_kwargs is not None: hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"]) # 3. Cross-Attention if self.attn2 is not None: if self.norm_type == "ada_norm": norm_hidden_states = self.norm2(hidden_states, timestep) elif self.norm_type in ["ada_norm_zero", "layer_norm", "layer_norm_i2vgen"]: norm_hidden_states = self.norm2(hidden_states) elif self.norm_type == "ada_norm_single": # For PixArt norm2 isn't applied here: # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103 norm_hidden_states = hidden_states elif self.norm_type == "ada_norm_continuous": norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"]) else: raise ValueError("Incorrect norm") if self.pos_embed is not None and self.norm_type != "ada_norm_single": norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # 4. Feed-forward # i2vgen doesn't have this norm 🤷‍♂️ if self.norm_type == "ada_norm_continuous": norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"]) elif not self.norm_type == "ada_norm_single": norm_hidden_states = self.norm3(hidden_states) if self.norm_type == "ada_norm_zero": norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self.norm_type == "ada_norm_single": norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.norm_type == "ada_norm_zero": ff_output = gate_mlp.unsqueeze(1) * ff_output elif self.norm_type == "ada_norm_single": ff_output = gate_mlp * ff_output hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states class LuminaFeedForward(nn.Module): r""" A feed-forward layer. Parameters: hidden_size (`int`): The dimensionality of the hidden layers in the model. This parameter determines the width of the model's hidden representations. intermediate_size (`int`): The intermediate dimension of the feedforward layer. multiple_of (`int`, *optional*): Value to ensure hidden dimension is a multiple of this value. ffn_dim_multiplier (float, *optional*): Custom multiplier for hidden dimension. Defaults to None. """ def __init__( self, dim: int, inner_dim: int, multiple_of: Optional[int] = 256, ffn_dim_multiplier: Optional[float] = None, ): super().__init__() # custom hidden_size factor multiplier if ffn_dim_multiplier is not None: inner_dim = int(ffn_dim_multiplier * inner_dim) inner_dim = multiple_of * ((inner_dim + multiple_of - 1) // multiple_of) self.linear_1 = nn.Linear( dim, inner_dim, bias=False, ) self.linear_2 = nn.Linear( inner_dim, dim, bias=False, ) self.linear_3 = nn.Linear( dim, inner_dim, bias=False, ) self.silu = FP32SiLU() def forward(self, x): return self.linear_2(self.silu(self.linear_1(x)) * self.linear_3(x)) @maybe_allow_in_graph class TemporalBasicTransformerBlock(nn.Module): r""" A basic Transformer block for video like data. Parameters: dim (`int`): The number of channels in the input and output. time_mix_inner_dim (`int`): The number of channels for temporal attention. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. """ def __init__( self, dim: int, time_mix_inner_dim: int, num_attention_heads: int, attention_head_dim: int, cross_attention_dim: Optional[int] = None, ): super().__init__() self.is_res = dim == time_mix_inner_dim self.norm_in = nn.LayerNorm(dim) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn self.ff_in = FeedForward( dim, dim_out=time_mix_inner_dim, activation_fn="geglu", ) self.norm1 = nn.LayerNorm(time_mix_inner_dim) self.attn1 = Attention( query_dim=time_mix_inner_dim, heads=num_attention_heads, dim_head=attention_head_dim, cross_attention_dim=None, ) # 2. Cross-Attn if cross_attention_dim is not None: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = nn.LayerNorm(time_mix_inner_dim) self.attn2 = Attention( query_dim=time_mix_inner_dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(time_mix_inner_dim) self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") # let chunk size default to None self._chunk_size = None self._chunk_dim = None def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): # Sets chunk feed-forward self._chunk_size = chunk_size # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off self._chunk_dim = 1 def forward( self, hidden_states: torch.Tensor, num_frames: int, encoder_hidden_states: Optional[torch.Tensor] = None, ) -> torch.Tensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] batch_frames, seq_length, channels = hidden_states.shape batch_size = batch_frames // num_frames hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) residual = hidden_states hidden_states = self.norm_in(hidden_states) if self._chunk_size is not None: hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size) else: hidden_states = self.ff_in(hidden_states) if self.is_res: hidden_states = hidden_states + residual norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states # 3. Cross-Attention if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = attn_output + hidden_states # 4. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.is_res: hidden_states = ff_output + hidden_states else: hidden_states = ff_output hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) return hidden_states class SkipFFTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, kv_input_dim: int, kv_input_dim_proj_use_bias: bool, dropout=0.0, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, attention_out_bias: bool = True, ): super().__init__() if kv_input_dim != dim: self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias) else: self.kv_mapper = None self.norm1 = RMSNorm(dim, 1e-06) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim, out_bias=attention_out_bias, ) self.norm2 = RMSNorm(dim, 1e-06) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, out_bias=attention_out_bias, ) def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs): cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} if self.kv_mapper is not None: encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states)) norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states return hidden_states @maybe_allow_in_graph class FreeNoiseTransformerBlock(nn.Module): r""" A FreeNoise Transformer block. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (`int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (`bool`, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, defaults to `False`): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, defaults to `False`): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, defaults to `False`): Whether to upcast the attention computation to float32. This is useful for mixed precision training. norm_elementwise_affine (`bool`, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization. norm_type (`str`, defaults to `"layer_norm"`): The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. final_dropout (`bool` defaults to `False`): Whether to apply a final dropout after the last feed-forward layer. attention_type (`str`, defaults to `"default"`): The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. positional_embeddings (`str`, *optional*): The type of positional embeddings to apply to. num_positional_embeddings (`int`, *optional*, defaults to `None`): The maximum number of positional embeddings to apply. ff_inner_dim (`int`, *optional*): Hidden dimension of feed-forward MLP. ff_bias (`bool`, defaults to `True`): Whether or not to use bias in feed-forward MLP. attention_out_bias (`bool`, defaults to `True`): Whether or not to use bias in attention output project layer. context_length (`int`, defaults to `16`): The maximum number of frames that the FreeNoise block processes at once. context_stride (`int`, defaults to `4`): The number of frames to be skipped before starting to process a new batch of `context_length` frames. weighting_scheme (`str`, defaults to `"pyramid"`): The weighting scheme to use for weighting averaging of processed latent frames. As described in the Equation 9. of the [FreeNoise](https://huggingface.co/papers/2310.15169) paper, "pyramid" is the default setting used. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout: float = 0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", norm_eps: float = 1e-5, final_dropout: bool = False, positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ff_inner_dim: Optional[int] = None, ff_bias: bool = True, attention_out_bias: bool = True, context_length: int = 16, context_stride: int = 4, weighting_scheme: str = "pyramid", ): super().__init__() self.dim = dim self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim self.dropout = dropout self.cross_attention_dim = cross_attention_dim self.activation_fn = activation_fn self.attention_bias = attention_bias self.double_self_attention = double_self_attention self.norm_elementwise_affine = norm_elementwise_affine self.positional_embeddings = positional_embeddings self.num_positional_embeddings = num_positional_embeddings self.only_cross_attention = only_cross_attention self.set_free_noise_properties(context_length, context_stride, weighting_scheme) # We keep these boolean flags for backward-compatibility. self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.use_ada_layer_norm_single = norm_type == "ada_norm_single" self.use_layer_norm = norm_type == "layer_norm" self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) self.norm_type = norm_type self.num_embeds_ada_norm = num_embeds_ada_norm if positional_embeddings and (num_positional_embeddings is None): raise ValueError( "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." ) if positional_embeddings == "sinusoidal": self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) else: self.pos_embed = None # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, out_bias=attention_out_bias, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, out_bias=attention_out_bias, ) # is self-attn if encoder_hidden_states is none # 3. Feed-forward self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias, ) self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine) # let chunk size default to None self._chunk_size = None self._chunk_dim = 0 def _get_frame_indices(self, num_frames: int) -> List[Tuple[int, int]]: frame_indices = [] for i in range(0, num_frames - self.context_length + 1, self.context_stride): window_start = i window_end = min(num_frames, i + self.context_length) frame_indices.append((window_start, window_end)) return frame_indices def _get_frame_weights(self, num_frames: int, weighting_scheme: str = "pyramid") -> List[float]: if weighting_scheme == "flat": weights = [1.0] * num_frames elif weighting_scheme == "pyramid": if num_frames % 2 == 0: # num_frames = 4 => [1, 2, 2, 1] mid = num_frames // 2 weights = list(range(1, mid + 1)) weights = weights + weights[::-1] else: # num_frames = 5 => [1, 2, 3, 2, 1] mid = (num_frames + 1) // 2 weights = list(range(1, mid)) weights = weights + [mid] + weights[::-1] elif weighting_scheme == "delayed_reverse_sawtooth": if num_frames % 2 == 0: # num_frames = 4 => [0.01, 2, 2, 1] mid = num_frames // 2 weights = [0.01] * (mid - 1) + [mid] weights = weights + list(range(mid, 0, -1)) else: # num_frames = 5 => [0.01, 0.01, 3, 2, 1] mid = (num_frames + 1) // 2 weights = [0.01] * mid weights = weights + list(range(mid, 0, -1)) else: raise ValueError(f"Unsupported value for weighting_scheme={weighting_scheme}") return weights def set_free_noise_properties( self, context_length: int, context_stride: int, weighting_scheme: str = "pyramid" ) -> None: self.context_length = context_length self.context_stride = context_stride self.weighting_scheme = weighting_scheme def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0) -> None: # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Dict[str, Any] = None, *args, **kwargs, ) -> torch.Tensor: if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} # hidden_states: [B x H x W, F, C] device = hidden_states.device dtype = hidden_states.dtype num_frames = hidden_states.size(1) frame_indices = self._get_frame_indices(num_frames) frame_weights = self._get_frame_weights(self.context_length, self.weighting_scheme) frame_weights = torch.tensor(frame_weights, device=device, dtype=dtype).unsqueeze(0).unsqueeze(-1) is_last_frame_batch_complete = frame_indices[-1][1] == num_frames # Handle out-of-bounds case if num_frames isn't perfectly divisible by context_length # For example, num_frames=25, context_length=16, context_stride=4, then we expect the ranges: # [(0, 16), (4, 20), (8, 24), (10, 26)] if not is_last_frame_batch_complete: if num_frames < self.context_length: raise ValueError(f"Expected {num_frames=} to be greater or equal than {self.context_length=}") last_frame_batch_length = num_frames - frame_indices[-1][1] frame_indices.append((num_frames - self.context_length, num_frames)) num_times_accumulated = torch.zeros((1, num_frames, 1), device=device) accumulated_values = torch.zeros_like(hidden_states) for i, (frame_start, frame_end) in enumerate(frame_indices): # The reason for slicing here is to ensure that if (frame_end - frame_start) is to handle # cases like frame_indices=[(0, 16), (16, 20)], if the user provided a video with 19 frames, or # essentially a non-multiple of `context_length`. weights = torch.ones_like(num_times_accumulated[:, frame_start:frame_end]) weights *= frame_weights hidden_states_chunk = hidden_states[:, frame_start:frame_end] # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention norm_hidden_states = self.norm1(hidden_states_chunk) if self.pos_embed is not None: norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) hidden_states_chunk = attn_output + hidden_states_chunk if hidden_states_chunk.ndim == 4: hidden_states_chunk = hidden_states_chunk.squeeze(1) # 2. Cross-Attention if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states_chunk) if self.pos_embed is not None and self.norm_type != "ada_norm_single": norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) hidden_states_chunk = attn_output + hidden_states_chunk if i == len(frame_indices) - 1 and not is_last_frame_batch_complete: accumulated_values[:, -last_frame_batch_length:] += ( hidden_states_chunk[:, -last_frame_batch_length:] * weights[:, -last_frame_batch_length:] ) num_times_accumulated[:, -last_frame_batch_length:] += weights[:, -last_frame_batch_length] else: accumulated_values[:, frame_start:frame_end] += hidden_states_chunk * weights num_times_accumulated[:, frame_start:frame_end] += weights # TODO(aryan): Maybe this could be done in a better way. # # Previously, this was: # hidden_states = torch.where( # num_times_accumulated > 0, accumulated_values / num_times_accumulated, accumulated_values # ) # # The reasoning for the change here is `torch.where` became a bottleneck at some point when golfing memory # spikes. It is particularly noticeable when the number of frames is high. My understanding is that this comes # from tensors being copied - which is why we resort to spliting and concatenating here. I've not particularly # looked into this deeply because other memory optimizations led to more pronounced reductions. hidden_states = torch.cat( [ torch.where(num_times_split > 0, accumulated_split / num_times_split, accumulated_split) for accumulated_split, num_times_split in zip( accumulated_values.split(self.context_length, dim=1), num_times_accumulated.split(self.context_length, dim=1), ) ], dim=1, ).to(dtype) # 3. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states class FeedForward(nn.Module): r""" A feed-forward layer. Parameters: dim (`int`): The number of channels in the input. dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. bias (`bool`, defaults to True): Whether to use a bias in the linear layer. """ def __init__( self, dim: int, dim_out: Optional[int] = None, mult: int = 4, dropout: float = 0.0, activation_fn: str = "geglu", final_dropout: bool = False, inner_dim=None, bias: bool = True, ): super().__init__() if inner_dim is None: inner_dim = int(dim * mult) dim_out = dim_out if dim_out is not None else dim if activation_fn == "gelu": act_fn = GELU(dim, inner_dim, bias=bias) if activation_fn == "gelu-approximate": act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias) elif activation_fn == "geglu": act_fn = GEGLU(dim, inner_dim, bias=bias) elif activation_fn == "geglu-approximate": act_fn = ApproximateGELU(dim, inner_dim, bias=bias) elif activation_fn == "swiglu": act_fn = SwiGLU(dim, inner_dim, bias=bias) elif activation_fn == "linear-silu": act_fn = LinearActivation(dim, inner_dim, bias=bias, activation="silu") self.net = nn.ModuleList([]) # project in self.net.append(act_fn) # project dropout self.net.append(nn.Dropout(dropout)) # project out self.net.append(nn.Linear(inner_dim, dim_out, bias=bias)) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(dropout)) def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) for module in self.net: hidden_states = module(hidden_states) return hidden_states
diffusers/src/diffusers/models/attention.py/0
{ "file_path": "diffusers/src/diffusers/models/attention.py", "repo_id": "diffusers", "token_count": 33109 }
161
# Copyright 2025 The Qwen-Image Team, Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # We gratefully acknowledge the Wan Team for their outstanding contributions. # QwenImageVAE is further fine-tuned from the Wan Video VAE to achieve improved performance. # For more information about the Wan VAE, please refer to: # - GitHub: https://github.com/Wan-Video/Wan2.1 # - arXiv: https://arxiv.org/abs/2503.20314 from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin from ...utils import logging from ...utils.accelerate_utils import apply_forward_hook from ..activations import get_activation from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import DecoderOutput, DiagonalGaussianDistribution logger = logging.get_logger(__name__) # pylint: disable=invalid-name CACHE_T = 2 class QwenImageCausalConv3d(nn.Conv3d): r""" A custom 3D causal convolution layer with feature caching support. This layer extends the standard Conv3D layer by ensuring causality in the time dimension and handling feature caching for efficient inference. Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to all three sides of the input. Default: 0 """ def __init__( self, in_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int, int]], stride: Union[int, Tuple[int, int, int]] = 1, padding: Union[int, Tuple[int, int, int]] = 0, ) -> None: super().__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, ) # Set up causal padding self._padding = (self.padding[2], self.padding[2], self.padding[1], self.padding[1], 2 * self.padding[0], 0) self.padding = (0, 0, 0) def forward(self, x, cache_x=None): padding = list(self._padding) if cache_x is not None and self._padding[4] > 0: cache_x = cache_x.to(x.device) x = torch.cat([cache_x, x], dim=2) padding[4] -= cache_x.shape[2] x = F.pad(x, padding) return super().forward(x) class QwenImageRMS_norm(nn.Module): r""" A custom RMS normalization layer. Args: dim (int): The number of dimensions to normalize over. channel_first (bool, optional): Whether the input tensor has channels as the first dimension. Default is True. images (bool, optional): Whether the input represents image data. Default is True. bias (bool, optional): Whether to include a learnable bias term. Default is False. """ def __init__(self, dim: int, channel_first: bool = True, images: bool = True, bias: bool = False) -> None: super().__init__() broadcastable_dims = (1, 1, 1) if not images else (1, 1) shape = (dim, *broadcastable_dims) if channel_first else (dim,) self.channel_first = channel_first self.scale = dim**0.5 self.gamma = nn.Parameter(torch.ones(shape)) self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0.0 def forward(self, x): return F.normalize(x, dim=(1 if self.channel_first else -1)) * self.scale * self.gamma + self.bias class QwenImageUpsample(nn.Upsample): r""" Perform upsampling while ensuring the output tensor has the same data type as the input. Args: x (torch.Tensor): Input tensor to be upsampled. Returns: torch.Tensor: Upsampled tensor with the same data type as the input. """ def forward(self, x): return super().forward(x.float()).type_as(x) class QwenImageResample(nn.Module): r""" A custom resampling module for 2D and 3D data. Args: dim (int): The number of input/output channels. mode (str): The resampling mode. Must be one of: - 'none': No resampling (identity operation). - 'upsample2d': 2D upsampling with nearest-exact interpolation and convolution. - 'upsample3d': 3D upsampling with nearest-exact interpolation, convolution, and causal 3D convolution. - 'downsample2d': 2D downsampling with zero-padding and convolution. - 'downsample3d': 3D downsampling with zero-padding, convolution, and causal 3D convolution. """ def __init__(self, dim: int, mode: str) -> None: super().__init__() self.dim = dim self.mode = mode # layers if mode == "upsample2d": self.resample = nn.Sequential( QwenImageUpsample(scale_factor=(2.0, 2.0), mode="nearest-exact"), nn.Conv2d(dim, dim // 2, 3, padding=1), ) elif mode == "upsample3d": self.resample = nn.Sequential( QwenImageUpsample(scale_factor=(2.0, 2.0), mode="nearest-exact"), nn.Conv2d(dim, dim // 2, 3, padding=1), ) self.time_conv = QwenImageCausalConv3d(dim, dim * 2, (3, 1, 1), padding=(1, 0, 0)) elif mode == "downsample2d": self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2))) elif mode == "downsample3d": self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2))) self.time_conv = QwenImageCausalConv3d(dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0)) else: self.resample = nn.Identity() def forward(self, x, feat_cache=None, feat_idx=[0]): b, c, t, h, w = x.size() if self.mode == "upsample3d": if feat_cache is not None: idx = feat_idx[0] if feat_cache[idx] is None: feat_cache[idx] = "Rep" feat_idx[0] += 1 else: cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] != "Rep": # cache last frame of last two chunk cache_x = torch.cat( [feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2 ) if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] == "Rep": cache_x = torch.cat([torch.zeros_like(cache_x).to(cache_x.device), cache_x], dim=2) if feat_cache[idx] == "Rep": x = self.time_conv(x) else: x = self.time_conv(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 x = x.reshape(b, 2, c, t, h, w) x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]), 3) x = x.reshape(b, c, t * 2, h, w) t = x.shape[2] x = x.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) x = self.resample(x) x = x.view(b, t, x.size(1), x.size(2), x.size(3)).permute(0, 2, 1, 3, 4) if self.mode == "downsample3d": if feat_cache is not None: idx = feat_idx[0] if feat_cache[idx] is None: feat_cache[idx] = x.clone() feat_idx[0] += 1 else: cache_x = x[:, :, -1:, :, :].clone() x = self.time_conv(torch.cat([feat_cache[idx][:, :, -1:, :, :], x], 2)) feat_cache[idx] = cache_x feat_idx[0] += 1 return x class QwenImageResidualBlock(nn.Module): r""" A custom residual block module. Args: in_dim (int): Number of input channels. out_dim (int): Number of output channels. dropout (float, optional): Dropout rate for the dropout layer. Default is 0.0. non_linearity (str, optional): Type of non-linearity to use. Default is "silu". """ def __init__( self, in_dim: int, out_dim: int, dropout: float = 0.0, non_linearity: str = "silu", ) -> None: super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.nonlinearity = get_activation(non_linearity) # layers self.norm1 = QwenImageRMS_norm(in_dim, images=False) self.conv1 = QwenImageCausalConv3d(in_dim, out_dim, 3, padding=1) self.norm2 = QwenImageRMS_norm(out_dim, images=False) self.dropout = nn.Dropout(dropout) self.conv2 = QwenImageCausalConv3d(out_dim, out_dim, 3, padding=1) self.conv_shortcut = QwenImageCausalConv3d(in_dim, out_dim, 1) if in_dim != out_dim else nn.Identity() def forward(self, x, feat_cache=None, feat_idx=[0]): # Apply shortcut connection h = self.conv_shortcut(x) # First normalization and activation x = self.norm1(x) x = self.nonlinearity(x) if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv1(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv1(x) # Second normalization and activation x = self.norm2(x) x = self.nonlinearity(x) # Dropout x = self.dropout(x) if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv2(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv2(x) # Add residual connection return x + h class QwenImageAttentionBlock(nn.Module): r""" Causal self-attention with a single head. Args: dim (int): The number of channels in the input tensor. """ def __init__(self, dim): super().__init__() self.dim = dim # layers self.norm = QwenImageRMS_norm(dim) self.to_qkv = nn.Conv2d(dim, dim * 3, 1) self.proj = nn.Conv2d(dim, dim, 1) def forward(self, x): identity = x batch_size, channels, time, height, width = x.size() x = x.permute(0, 2, 1, 3, 4).reshape(batch_size * time, channels, height, width) x = self.norm(x) # compute query, key, value qkv = self.to_qkv(x) qkv = qkv.reshape(batch_size * time, 1, channels * 3, -1) qkv = qkv.permute(0, 1, 3, 2).contiguous() q, k, v = qkv.chunk(3, dim=-1) # apply attention x = F.scaled_dot_product_attention(q, k, v) x = x.squeeze(1).permute(0, 2, 1).reshape(batch_size * time, channels, height, width) # output projection x = self.proj(x) # Reshape back: [(b*t), c, h, w] -> [b, c, t, h, w] x = x.view(batch_size, time, channels, height, width) x = x.permute(0, 2, 1, 3, 4) return x + identity class QwenImageMidBlock(nn.Module): """ Middle block for QwenImageVAE encoder and decoder. Args: dim (int): Number of input/output channels. dropout (float): Dropout rate. non_linearity (str): Type of non-linearity to use. """ def __init__(self, dim: int, dropout: float = 0.0, non_linearity: str = "silu", num_layers: int = 1): super().__init__() self.dim = dim # Create the components resnets = [QwenImageResidualBlock(dim, dim, dropout, non_linearity)] attentions = [] for _ in range(num_layers): attentions.append(QwenImageAttentionBlock(dim)) resnets.append(QwenImageResidualBlock(dim, dim, dropout, non_linearity)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0]): # First residual block x = self.resnets[0](x, feat_cache, feat_idx) # Process through attention and residual blocks for attn, resnet in zip(self.attentions, self.resnets[1:]): if attn is not None: x = attn(x) x = resnet(x, feat_cache, feat_idx) return x class QwenImageEncoder3d(nn.Module): r""" A 3D encoder module. Args: dim (int): The base number of channels in the first layer. z_dim (int): The dimensionality of the latent space. dim_mult (list of int): Multipliers for the number of channels in each block. num_res_blocks (int): Number of residual blocks in each block. attn_scales (list of float): Scales at which to apply attention mechanisms. temperal_downsample (list of bool): Whether to downsample temporally in each block. dropout (float): Dropout rate for the dropout layers. non_linearity (str): Type of non-linearity to use. """ def __init__( self, dim=128, z_dim=4, dim_mult=[1, 2, 4, 4], num_res_blocks=2, attn_scales=[], temperal_downsample=[True, True, False], dropout=0.0, non_linearity: str = "silu", ): super().__init__() self.dim = dim self.z_dim = z_dim self.dim_mult = dim_mult self.num_res_blocks = num_res_blocks self.attn_scales = attn_scales self.temperal_downsample = temperal_downsample self.nonlinearity = get_activation(non_linearity) # dimensions dims = [dim * u for u in [1] + dim_mult] scale = 1.0 # init block self.conv_in = QwenImageCausalConv3d(3, dims[0], 3, padding=1) # downsample blocks self.down_blocks = nn.ModuleList([]) for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): # residual (+attention) blocks for _ in range(num_res_blocks): self.down_blocks.append(QwenImageResidualBlock(in_dim, out_dim, dropout)) if scale in attn_scales: self.down_blocks.append(QwenImageAttentionBlock(out_dim)) in_dim = out_dim # downsample block if i != len(dim_mult) - 1: mode = "downsample3d" if temperal_downsample[i] else "downsample2d" self.down_blocks.append(QwenImageResample(out_dim, mode=mode)) scale /= 2.0 # middle blocks self.mid_block = QwenImageMidBlock(out_dim, dropout, non_linearity, num_layers=1) # output blocks self.norm_out = QwenImageRMS_norm(out_dim, images=False) self.conv_out = QwenImageCausalConv3d(out_dim, z_dim, 3, padding=1) self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0]): if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: # cache last frame of last two chunk cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv_in(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv_in(x) ## downsamples for layer in self.down_blocks: if feat_cache is not None: x = layer(x, feat_cache, feat_idx) else: x = layer(x) ## middle x = self.mid_block(x, feat_cache, feat_idx) ## head x = self.norm_out(x) x = self.nonlinearity(x) if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: # cache last frame of last two chunk cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv_out(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv_out(x) return x class QwenImageUpBlock(nn.Module): """ A block that handles upsampling for the QwenImageVAE decoder. Args: in_dim (int): Input dimension out_dim (int): Output dimension num_res_blocks (int): Number of residual blocks dropout (float): Dropout rate upsample_mode (str, optional): Mode for upsampling ('upsample2d' or 'upsample3d') non_linearity (str): Type of non-linearity to use """ def __init__( self, in_dim: int, out_dim: int, num_res_blocks: int, dropout: float = 0.0, upsample_mode: Optional[str] = None, non_linearity: str = "silu", ): super().__init__() self.in_dim = in_dim self.out_dim = out_dim # Create layers list resnets = [] # Add residual blocks and attention if needed current_dim = in_dim for _ in range(num_res_blocks + 1): resnets.append(QwenImageResidualBlock(current_dim, out_dim, dropout, non_linearity)) current_dim = out_dim self.resnets = nn.ModuleList(resnets) # Add upsampling layer if needed self.upsamplers = None if upsample_mode is not None: self.upsamplers = nn.ModuleList([QwenImageResample(out_dim, mode=upsample_mode)]) self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0]): """ Forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor feat_cache (list, optional): Feature cache for causal convolutions feat_idx (list, optional): Feature index for cache management Returns: torch.Tensor: Output tensor """ for resnet in self.resnets: if feat_cache is not None: x = resnet(x, feat_cache, feat_idx) else: x = resnet(x) if self.upsamplers is not None: if feat_cache is not None: x = self.upsamplers[0](x, feat_cache, feat_idx) else: x = self.upsamplers[0](x) return x class QwenImageDecoder3d(nn.Module): r""" A 3D decoder module. Args: dim (int): The base number of channels in the first layer. z_dim (int): The dimensionality of the latent space. dim_mult (list of int): Multipliers for the number of channels in each block. num_res_blocks (int): Number of residual blocks in each block. attn_scales (list of float): Scales at which to apply attention mechanisms. temperal_upsample (list of bool): Whether to upsample temporally in each block. dropout (float): Dropout rate for the dropout layers. non_linearity (str): Type of non-linearity to use. """ def __init__( self, dim=128, z_dim=4, dim_mult=[1, 2, 4, 4], num_res_blocks=2, attn_scales=[], temperal_upsample=[False, True, True], dropout=0.0, non_linearity: str = "silu", ): super().__init__() self.dim = dim self.z_dim = z_dim self.dim_mult = dim_mult self.num_res_blocks = num_res_blocks self.attn_scales = attn_scales self.temperal_upsample = temperal_upsample self.nonlinearity = get_activation(non_linearity) # dimensions dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]] scale = 1.0 / 2 ** (len(dim_mult) - 2) # init block self.conv_in = QwenImageCausalConv3d(z_dim, dims[0], 3, padding=1) # middle blocks self.mid_block = QwenImageMidBlock(dims[0], dropout, non_linearity, num_layers=1) # upsample blocks self.up_blocks = nn.ModuleList([]) for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): # residual (+attention) blocks if i > 0: in_dim = in_dim // 2 # Determine if we need upsampling upsample_mode = None if i != len(dim_mult) - 1: upsample_mode = "upsample3d" if temperal_upsample[i] else "upsample2d" # Create and add the upsampling block up_block = QwenImageUpBlock( in_dim=in_dim, out_dim=out_dim, num_res_blocks=num_res_blocks, dropout=dropout, upsample_mode=upsample_mode, non_linearity=non_linearity, ) self.up_blocks.append(up_block) # Update scale for next iteration if upsample_mode is not None: scale *= 2.0 # output blocks self.norm_out = QwenImageRMS_norm(out_dim, images=False) self.conv_out = QwenImageCausalConv3d(out_dim, 3, 3, padding=1) self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0]): ## conv1 if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: # cache last frame of last two chunk cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv_in(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv_in(x) ## middle x = self.mid_block(x, feat_cache, feat_idx) ## upsamples for up_block in self.up_blocks: x = up_block(x, feat_cache, feat_idx) ## head x = self.norm_out(x) x = self.nonlinearity(x) if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: # cache last frame of last two chunk cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv_out(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv_out(x) return x class AutoencoderKLQwenImage(ModelMixin, ConfigMixin, FromOriginalModelMixin): r""" A VAE model with KL loss for encoding videos into latents and decoding latent representations into videos. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). """ _supports_gradient_checkpointing = False # fmt: off @register_to_config def __init__( self, base_dim: int = 96, z_dim: int = 16, dim_mult: Tuple[int] = [1, 2, 4, 4], num_res_blocks: int = 2, attn_scales: List[float] = [], temperal_downsample: List[bool] = [False, True, True], dropout: float = 0.0, latents_mean: List[float] = [-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921], latents_std: List[float] = [2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160], ) -> None: # fmt: on super().__init__() self.z_dim = z_dim self.temperal_downsample = temperal_downsample self.temperal_upsample = temperal_downsample[::-1] self.encoder = QwenImageEncoder3d( base_dim, z_dim * 2, dim_mult, num_res_blocks, attn_scales, self.temperal_downsample, dropout ) self.quant_conv = QwenImageCausalConv3d(z_dim * 2, z_dim * 2, 1) self.post_quant_conv = QwenImageCausalConv3d(z_dim, z_dim, 1) self.decoder = QwenImageDecoder3d( base_dim, z_dim, dim_mult, num_res_blocks, attn_scales, self.temperal_upsample, dropout ) self.spatial_compression_ratio = 2 ** len(self.temperal_downsample) # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. self.use_slicing = False # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the # intermediate tiles together, the memory requirement can be lowered. self.use_tiling = False # The minimal tile height and width for spatial tiling to be used self.tile_sample_min_height = 256 self.tile_sample_min_width = 256 # The minimal distance between two spatial tiles self.tile_sample_stride_height = 192 self.tile_sample_stride_width = 192 # Precompute and cache conv counts for encoder and decoder for clear_cache speedup self._cached_conv_counts = { "decoder": sum(isinstance(m, QwenImageCausalConv3d) for m in self.decoder.modules()) if self.decoder is not None else 0, "encoder": sum(isinstance(m, QwenImageCausalConv3d) for m in self.encoder.modules()) if self.encoder is not None else 0, } def enable_tiling( self, tile_sample_min_height: Optional[int] = None, tile_sample_min_width: Optional[int] = None, tile_sample_stride_height: Optional[float] = None, tile_sample_stride_width: Optional[float] = None, ) -> None: r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. Args: tile_sample_min_height (`int`, *optional*): The minimum height required for a sample to be separated into tiles across the height dimension. tile_sample_min_width (`int`, *optional*): The minimum width required for a sample to be separated into tiles across the width dimension. tile_sample_stride_height (`int`, *optional*): The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are no tiling artifacts produced across the height dimension. tile_sample_stride_width (`int`, *optional*): The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling artifacts produced across the width dimension. """ self.use_tiling = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width def disable_tiling(self) -> None: r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.use_tiling = False def enable_slicing(self) -> None: r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True def disable_slicing(self) -> None: r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False def clear_cache(self): def _count_conv3d(model): count = 0 for m in model.modules(): if isinstance(m, QwenImageCausalConv3d): count += 1 return count self._conv_num = _count_conv3d(self.decoder) self._conv_idx = [0] self._feat_map = [None] * self._conv_num # cache encode self._enc_conv_num = _count_conv3d(self.encoder) self._enc_conv_idx = [0] self._enc_feat_map = [None] * self._enc_conv_num def _encode(self, x: torch.Tensor): _, _, num_frame, height, width = x.shape if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): return self.tiled_encode(x) self.clear_cache() iter_ = 1 + (num_frame - 1) // 4 for i in range(iter_): self._enc_conv_idx = [0] if i == 0: out = self.encoder(x[:, :, :1, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx) else: out_ = self.encoder( x[:, :, 1 + 4 * (i - 1) : 1 + 4 * i, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx, ) out = torch.cat([out, out_], 2) enc = self.quant_conv(out) self.clear_cache() return enc @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: r""" Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded videos. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool = True): _, _, num_frame, height, width = z.shape tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height): return self.tiled_decode(z, return_dict=return_dict) self.clear_cache() x = self.post_quant_conv(z) for i in range(num_frame): self._conv_idx = [0] if i == 0: out = self.decoder(x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx) else: out_ = self.decoder(x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx) out = torch.cat([out, out_], 2) out = torch.clamp(out, min=-1.0, max=1.0) self.clear_cache() if not return_dict: return (out,) return DecoderOutput(sample=out) @apply_forward_hook def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: r""" Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-2], b.shape[-2], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( y / blend_extent ) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-1], b.shape[-1], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( x / blend_extent ) return b def tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput: r"""Encode a batch of images using a tiled encoder. Args: x (`torch.Tensor`): Input batch of videos. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ _, _, num_frames, height, width = x.shape latent_height = height // self.spatial_compression_ratio latent_width = width // self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = tile_latent_min_height - tile_latent_stride_height blend_width = tile_latent_min_width - tile_latent_stride_width # Split x into overlapping tiles and encode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, self.tile_sample_stride_height): row = [] for j in range(0, width, self.tile_sample_stride_width): self.clear_cache() time = [] frame_range = 1 + (num_frames - 1) // 4 for k in range(frame_range): self._enc_conv_idx = [0] if k == 0: tile = x[:, :, :1, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width] else: tile = x[ :, :, 1 + 4 * (k - 1) : 1 + 4 * k, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width, ] tile = self.encoder(tile, feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx) tile = self.quant_conv(tile) time.append(tile) row.append(torch.cat(time, dim=2)) rows.append(row) self.clear_cache() result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width]) result_rows.append(torch.cat(result_row, dim=-1)) enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width] return enc def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ _, _, num_frames, height, width = z.shape sample_height = height * self.spatial_compression_ratio sample_width = width * self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = self.tile_sample_min_height - self.tile_sample_stride_height blend_width = self.tile_sample_min_width - self.tile_sample_stride_width # Split z into overlapping tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, tile_latent_stride_height): row = [] for j in range(0, width, tile_latent_stride_width): self.clear_cache() time = [] for k in range(num_frames): self._conv_idx = [0] tile = z[:, :, k : k + 1, i : i + tile_latent_min_height, j : j + tile_latent_min_width] tile = self.post_quant_conv(tile) decoded = self.decoder(tile, feat_cache=self._feat_map, feat_idx=self._conv_idx) time.append(decoded) row.append(torch.cat(time, dim=2)) rows.append(row) self.clear_cache() result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width]) result_rows.append(torch.cat(result_row, dim=-1)) dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width] if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[DecoderOutput, torch.Tensor]: """ Args: sample (`torch.Tensor`): Input sample. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, return_dict=return_dict) return dec
diffusers/src/diffusers/models/autoencoders/autoencoder_kl_qwenimage.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl_qwenimage.py", "repo_id": "diffusers", "token_count": 20051 }
162
# Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers from ..attention_processor import AttentionProcessor from ..controlnets.controlnet import ControlNetConditioningEmbedding, zero_module from ..embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..transformers.transformer_flux import FluxSingleTransformerBlock, FluxTransformerBlock logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class FluxControlNetOutput(BaseOutput): controlnet_block_samples: Tuple[torch.Tensor] controlnet_single_block_samples: Tuple[torch.Tensor] class FluxControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, num_layers: int = 19, num_single_layers: int = 38, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, pooled_projection_dim: int = 768, guidance_embeds: bool = False, axes_dims_rope: List[int] = [16, 56, 56], num_mode: int = None, conditioning_embedding_channels: int = None, ): super().__init__() self.out_channels = in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) text_time_guidance_cls = ( CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings ) self.time_text_embed = text_time_guidance_cls( embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim ) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = torch.nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ FluxTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for i in range(num_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ FluxSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for i in range(num_single_layers) ] ) # controlnet_blocks self.controlnet_blocks = nn.ModuleList([]) for _ in range(len(self.transformer_blocks)): self.controlnet_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) self.controlnet_single_blocks = nn.ModuleList([]) for _ in range(len(self.single_transformer_blocks)): self.controlnet_single_blocks.append(zero_module(nn.Linear(self.inner_dim, self.inner_dim))) self.union = num_mode is not None if self.union: self.controlnet_mode_embedder = nn.Embedding(num_mode, self.inner_dim) if conditioning_embedding_channels is not None: self.input_hint_block = ControlNetConditioningEmbedding( conditioning_embedding_channels=conditioning_embedding_channels, block_out_channels=(16, 16, 16, 16) ) self.controlnet_x_embedder = torch.nn.Linear(in_channels, self.inner_dim) else: self.input_hint_block = None self.controlnet_x_embedder = zero_module(torch.nn.Linear(in_channels, self.inner_dim)) self.gradient_checkpointing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self): r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) @classmethod def from_transformer( cls, transformer, num_layers: int = 4, num_single_layers: int = 10, attention_head_dim: int = 128, num_attention_heads: int = 24, load_weights_from_transformer=True, ): config = dict(transformer.config) config["num_layers"] = num_layers config["num_single_layers"] = num_single_layers config["attention_head_dim"] = attention_head_dim config["num_attention_heads"] = num_attention_heads controlnet = cls.from_config(config) if load_weights_from_transformer: controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict()) controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict()) controlnet.context_embedder.load_state_dict(transformer.context_embedder.state_dict()) controlnet.x_embedder.load_state_dict(transformer.x_embedder.state_dict()) controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict(), strict=False) controlnet.single_transformer_blocks.load_state_dict( transformer.single_transformer_blocks.state_dict(), strict=False ) controlnet.controlnet_x_embedder = zero_module(controlnet.controlnet_x_embedder) return controlnet def forward( self, hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, controlnet_mode: torch.Tensor = None, conditioning_scale: float = 1.0, encoder_hidden_states: torch.Tensor = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[torch.FloatTensor, Transformer2DModelOutput]: """ The [`FluxTransformer2DModel`] forward method. Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input `hidden_states`. controlnet_cond (`torch.Tensor`): The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. controlnet_mode (`torch.Tensor`): The mode tensor of shape `(batch_size, 1)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected from the embeddings of input conditions. timestep ( `torch.LongTensor`): Used to indicate denoising step. block_controlnet_hidden_states: (`list` of `torch.Tensor`): A list of tensors that if specified are added to the residuals of transformer blocks. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if joint_attention_kwargs is not None: joint_attention_kwargs = joint_attention_kwargs.copy() lora_scale = joint_attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) hidden_states = self.x_embedder(hidden_states) if self.input_hint_block is not None: controlnet_cond = self.input_hint_block(controlnet_cond) batch_size, channels, height_pw, width_pw = controlnet_cond.shape height = height_pw // self.config.patch_size width = width_pw // self.config.patch_size controlnet_cond = controlnet_cond.reshape( batch_size, channels, height, self.config.patch_size, width, self.config.patch_size ) controlnet_cond = controlnet_cond.permute(0, 2, 4, 1, 3, 5) controlnet_cond = controlnet_cond.reshape(batch_size, height * width, -1) # add hidden_states = hidden_states + self.controlnet_x_embedder(controlnet_cond) timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 else: guidance = None temb = ( self.time_text_embed(timestep, pooled_projections) if guidance is None else self.time_text_embed(timestep, guidance, pooled_projections) ) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if txt_ids.ndim == 3: logger.warning( "Passing `txt_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning( "Passing `img_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) img_ids = img_ids[0] if self.union: # union mode if controlnet_mode is None: raise ValueError("`controlnet_mode` cannot be `None` when applying ControlNet-Union") # union mode emb controlnet_mode_emb = self.controlnet_mode_embedder(controlnet_mode) encoder_hidden_states = torch.cat([controlnet_mode_emb, encoder_hidden_states], dim=1) txt_ids = torch.cat([txt_ids[:1], txt_ids], dim=0) ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) block_samples = () for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) block_samples = block_samples + (hidden_states,) single_block_samples = () for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) single_block_samples = single_block_samples + (hidden_states,) # controlnet block controlnet_block_samples = () for block_sample, controlnet_block in zip(block_samples, self.controlnet_blocks): block_sample = controlnet_block(block_sample) controlnet_block_samples = controlnet_block_samples + (block_sample,) controlnet_single_block_samples = () for single_block_sample, controlnet_block in zip(single_block_samples, self.controlnet_single_blocks): single_block_sample = controlnet_block(single_block_sample) controlnet_single_block_samples = controlnet_single_block_samples + (single_block_sample,) # scaling controlnet_block_samples = [sample * conditioning_scale for sample in controlnet_block_samples] controlnet_single_block_samples = [sample * conditioning_scale for sample in controlnet_single_block_samples] controlnet_block_samples = None if len(controlnet_block_samples) == 0 else controlnet_block_samples controlnet_single_block_samples = ( None if len(controlnet_single_block_samples) == 0 else controlnet_single_block_samples ) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (controlnet_block_samples, controlnet_single_block_samples) return FluxControlNetOutput( controlnet_block_samples=controlnet_block_samples, controlnet_single_block_samples=controlnet_single_block_samples, ) class FluxMultiControlNetModel(ModelMixin): r""" `FluxMultiControlNetModel` wrapper class for Multi-FluxControlNetModel This module is a wrapper for multiple instances of the `FluxControlNetModel`. The `forward()` API is designed to be compatible with `FluxControlNetModel`. Args: controlnets (`List[FluxControlNetModel]`): Provides additional conditioning to the unet during the denoising process. You must set multiple `FluxControlNetModel` as a list. """ def __init__(self, controlnets): super().__init__() self.nets = nn.ModuleList(controlnets) def forward( self, hidden_states: torch.FloatTensor, controlnet_cond: List[torch.tensor], controlnet_mode: List[torch.tensor], conditioning_scale: List[float], encoder_hidden_states: torch.Tensor = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[FluxControlNetOutput, Tuple]: # ControlNet-Union with multiple conditions # only load one ControlNet for saving memories if len(self.nets) == 1: controlnet = self.nets[0] for i, (image, mode, scale) in enumerate(zip(controlnet_cond, controlnet_mode, conditioning_scale)): block_samples, single_block_samples = controlnet( hidden_states=hidden_states, controlnet_cond=image, controlnet_mode=mode[:, None], conditioning_scale=scale, timestep=timestep, guidance=guidance, pooled_projections=pooled_projections, encoder_hidden_states=encoder_hidden_states, txt_ids=txt_ids, img_ids=img_ids, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict, ) # merge samples if i == 0: control_block_samples = block_samples control_single_block_samples = single_block_samples else: if block_samples is not None and control_block_samples is not None: control_block_samples = [ control_block_sample + block_sample for control_block_sample, block_sample in zip(control_block_samples, block_samples) ] if single_block_samples is not None and control_single_block_samples is not None: control_single_block_samples = [ control_single_block_sample + block_sample for control_single_block_sample, block_sample in zip( control_single_block_samples, single_block_samples ) ] # Regular Multi-ControlNets # load all ControlNets into memories else: for i, (image, mode, scale, controlnet) in enumerate( zip(controlnet_cond, controlnet_mode, conditioning_scale, self.nets) ): block_samples, single_block_samples = controlnet( hidden_states=hidden_states, controlnet_cond=image, controlnet_mode=mode[:, None], conditioning_scale=scale, timestep=timestep, guidance=guidance, pooled_projections=pooled_projections, encoder_hidden_states=encoder_hidden_states, txt_ids=txt_ids, img_ids=img_ids, joint_attention_kwargs=joint_attention_kwargs, return_dict=return_dict, ) # merge samples if i == 0: control_block_samples = block_samples control_single_block_samples = single_block_samples else: if block_samples is not None and control_block_samples is not None: control_block_samples = [ control_block_sample + block_sample for control_block_sample, block_sample in zip(control_block_samples, block_samples) ] if single_block_samples is not None and control_single_block_samples is not None: control_single_block_samples = [ control_single_block_sample + block_sample for control_single_block_sample, block_sample in zip( control_single_block_samples, single_block_samples ) ] return control_block_samples, control_single_block_samples
diffusers/src/diffusers/models/controlnets/controlnet_flux.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnets/controlnet_flux.py", "repo_id": "diffusers", "token_count": 10519 }
163
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pickle import UnpicklingError from typing import Any, Dict, Union import jax import jax.numpy as jnp import msgpack.exceptions from flax.core.frozen_dict import FrozenDict, unfreeze from flax.serialization import from_bytes, to_bytes from flax.traverse_util import flatten_dict, unflatten_dict from huggingface_hub import create_repo, hf_hub_download from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, validate_hf_hub_args, ) from requests import HTTPError from .. import __version__, is_torch_available from ..utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, PushToHubMixin, logging, ) from .modeling_flax_pytorch_utils import convert_pytorch_state_dict_to_flax logger = logging.get_logger(__name__) class FlaxModelMixin(PushToHubMixin): r""" Base class for all Flax models. [`FlaxModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and saving models. - **config_name** ([`str`]) -- Filename to save a model to when calling [`~FlaxModelMixin.save_pretrained`]. """ config_name = CONFIG_NAME _automatically_saved_args = ["_diffusers_version", "_class_name", "_name_or_path"] _flax_internal_args = ["name", "parent", "dtype"] @classmethod def _from_config(cls, config, **kwargs): """ All context managers that the model should be initialized under go here. """ return cls(config, **kwargs) def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any: """ Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`. """ # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27 def conditional_cast(param): if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating): param = param.astype(dtype) return param if mask is None: return jax.tree_map(conditional_cast, params) flat_params = flatten_dict(params) flat_mask, _ = jax.tree_flatten(mask) for masked, key in zip(flat_mask, flat_params.keys()): if masked: param = flat_params[key] flat_params[key] = conditional_cast(param) return unflatten_dict(flat_params) def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on a TPU to explicitly convert the model parameters to bfloat16 precision to do full half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # load model >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision >>> params = model.to_bf16(params) >>> # If you don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> flat_params = traverse_util.flatten_dict(params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> params = model.to_bf16(params, mask) ```""" return self._cast_floating_to(params, jnp.bfloat16, mask) def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # Download model and configuration from huggingface.co >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model params will be in fp32, to illustrate the use of this method, >>> # we'll first cast to fp16 and back to fp32 >>> params = model.to_f16(params) >>> # now cast back to fp32 >>> params = model.to_fp32(params) ```""" return self._cast_floating_to(params, jnp.float32, mask) def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None): r""" Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the `params` in place. This method can be used on a GPU to explicitly convert the model parameters to float16 precision to do full half-precision training or to save weights in float16 for inference in order to save memory and improve speed. Arguments: params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. mask (`Union[Dict, FrozenDict]`): A `PyTree` with same structure as the `params` tree. The leaves should be booleans. It should be `True` for params you want to cast, and `False` for those you want to skip. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # load model >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # By default, the model params will be in fp32, to cast these to float16 >>> params = model.to_fp16(params) >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale) >>> # then pass the mask as follows >>> from flax import traverse_util >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> flat_params = traverse_util.flatten_dict(params) >>> mask = { ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale")) ... for path in flat_params ... } >>> mask = traverse_util.unflatten_dict(mask) >>> params = model.to_fp16(params, mask) ```""" return self._cast_floating_to(params, jnp.float16, mask) def init_weights(self, rng: jax.Array) -> Dict: raise NotImplementedError(f"init_weights method has to be implemented for {self}") @classmethod @validate_hf_hub_args def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], dtype: jnp.dtype = jnp.float32, *model_args, **kwargs, ): r""" Instantiate a pretrained Flax model from a pretrained model configuration. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved using [`~FlaxModelMixin.save_pretrained`]. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified, all the computation will be performed with the given `dtype`. <Tip> This only specifies the dtype of the *computation* and does not influence the dtype of model parameters. If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and [`~FlaxModelMixin.to_bf16`]. </Tip> model_args (sequence of positional arguments, *optional*): All remaining positional arguments are passed to the underlying model's `__init__` method. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only(`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. from_pt (`bool`, *optional*, defaults to `False`): Load the model weights from a PyTorch checkpoint save file. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it is loaded) and initiate the model (for example, `output_attentions=True`). Behaves differently depending on whether a `config` is provided or automatically loaded: - If a configuration is provided with `config`, `kwargs` are directly passed to the underlying model's `__init__` method (we assume all relevant updates to the configuration have already been done). - If a configuration is not provided, `kwargs` are first passed to the configuration class initialization function [`~ConfigMixin.from_config`]. Each key of the `kwargs` that corresponds to a configuration attribute is used to override said attribute with the supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute are passed to the underlying model's `__init__` function. Examples: ```python >>> from diffusers import FlaxUNet2DConditionModel >>> # Download model and configuration from huggingface.co and cache. >>> model, params = FlaxUNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5") >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable). >>> model, params = FlaxUNet2DConditionModel.from_pretrained("./test/saved_model/") ``` If you get the error message below, you need to finetune the weights for your downstream task: ```bash Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` """ config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) from_pt = kwargs.pop("from_pt", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) user_agent = { "diffusers": __version__, "file_type": "model", "framework": "flax", } # Load config if we don't provide one if config is None: config, unused_kwargs = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, subfolder=subfolder, **kwargs, ) model, model_kwargs = cls.from_config(config, dtype=dtype, return_unused_kwargs=True, **unused_kwargs) # Load model pretrained_path_with_subfolder = ( pretrained_model_name_or_path if subfolder is None else os.path.join(pretrained_model_name_or_path, subfolder) ) if os.path.isdir(pretrained_path_with_subfolder): if from_pt: if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} " ) model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)): # Load from a Flax checkpoint model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME) # Check if pytorch weights exist instead elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)): raise EnvironmentError( f"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model" " using `from_pt=True`." ) else: raise EnvironmentError( f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_path_with_subfolder}." ) else: try: model_file = hf_hub_download( pretrained_model_name_or_path, filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision, ) except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `token` or log in with `hf auth login`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n" f"{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your" " internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." ) if from_pt: if is_torch_available(): from .modeling_utils import load_state_dict else: raise EnvironmentError( "Can't load the model in PyTorch format because PyTorch is not installed. " "Please, install PyTorch or use native Flax weights." ) # Step 1: Get the pytorch file pytorch_model_file = load_state_dict(model_file) # Step 2: Convert the weights state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model) else: try: with open(model_file, "rb") as state_f: state = from_bytes(cls, state_f.read()) except (UnpicklingError, msgpack.exceptions.ExtraData) as e: try: with open(model_file) as f: if f.read().startswith("version"): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please" " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" " folder you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. ") # make sure all arrays are stored as jnp.ndarray # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4: # https://github.com/google/flax/issues/1261 state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state) # flatten dicts state = flatten_dict(state) params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0)) required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) shape_state = flatten_dict(unfreeze(params_shape_tree)) missing_keys = required_params - set(state.keys()) unexpected_keys = set(state.keys()) - required_params if missing_keys: logger.warning( f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. " "Make sure to call model.init_weights to initialize the missing weights." ) cls._missing_keys = missing_keys for key in state.keys(): if key in shape_state and state[key].shape != shape_state[key].shape: raise ValueError( f"Trying to load the pretrained weight for {key} failed: checkpoint has shape " f"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. " ) # remove unexpected keys to not be saved again for unexpected_key in unexpected_keys: del state[unexpected_key] if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) return model, unflatten_dict(state) def save_pretrained( self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict], is_main_process: bool = True, push_to_hub: bool = False, **kwargs, ): """ Save a model and its configuration file to a directory so that it can be reloaded using the [`~FlaxModelMixin.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to save a model and its configuration file to. Will be created if it doesn't exist. params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful during distributed training and you need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) private = kwargs.pop("private", None) create_pr = kwargs.pop("create_pr", False) token = kwargs.pop("token", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id model_to_save = self # Attach architecture to the config # Save the config if is_main_process: model_to_save.save_config(save_directory) # save model output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME) with open(output_model_file, "wb") as f: model_bytes = to_bytes(params) f.write(model_bytes) logger.info(f"Model weights saved in {output_model_file}") if push_to_hub: self._upload_folder( save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, )
diffusers/src/diffusers/models/modeling_flax_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_flax_utils.py", "repo_id": "diffusers", "token_count": 11784 }
164
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ..attention import BasicTransformerBlock from ..attention_processor import Attention, AttentionProcessor, AttnProcessor, FusedAttnProcessor2_0 from ..embeddings import PatchEmbed, PixArtAlphaTextProjection from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormSingle logger = logging.get_logger(__name__) # pylint: disable=invalid-name class PixArtTransformer2DModel(ModelMixin, ConfigMixin): r""" A 2D Transformer model as introduced in PixArt family of models (https://huggingface.co/papers/2310.00426, https://huggingface.co/papers/2403.04692). Parameters: num_attention_heads (int, optional, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (int, optional, defaults to 72): The number of channels in each head. in_channels (int, defaults to 4): The number of channels in the input. out_channels (int, optional): The number of channels in the output. Specify this parameter if the output channel number differs from the input. num_layers (int, optional, defaults to 28): The number of layers of Transformer blocks to use. dropout (float, optional, defaults to 0.0): The dropout probability to use within the Transformer blocks. norm_num_groups (int, optional, defaults to 32): Number of groups for group normalization within Transformer blocks. cross_attention_dim (int, optional): The dimensionality for cross-attention layers, typically matching the encoder's hidden dimension. attention_bias (bool, optional, defaults to True): Configure if the Transformer blocks' attention should contain a bias parameter. sample_size (int, defaults to 128): The width of the latent images. This parameter is fixed during training. patch_size (int, defaults to 2): Size of the patches the model processes, relevant for architectures working on non-sequential data. activation_fn (str, optional, defaults to "gelu-approximate"): Activation function to use in feed-forward networks within Transformer blocks. num_embeds_ada_norm (int, optional, defaults to 1000): Number of embeddings for AdaLayerNorm, fixed during training and affects the maximum denoising steps during inference. upcast_attention (bool, optional, defaults to False): If true, upcasts the attention mechanism dimensions for potentially improved performance. norm_type (str, optional, defaults to "ada_norm_zero"): Specifies the type of normalization used, can be 'ada_norm_zero'. norm_elementwise_affine (bool, optional, defaults to False): If true, enables element-wise affine parameters in the normalization layers. norm_eps (float, optional, defaults to 1e-6): A small constant added to the denominator in normalization layers to prevent division by zero. interpolation_scale (int, optional): Scale factor to use during interpolating the position embeddings. use_additional_conditions (bool, optional): If we're using additional conditions as inputs. attention_type (str, optional, defaults to "default"): Kind of attention mechanism to be used. caption_channels (int, optional, defaults to None): Number of channels to use for projecting the caption embeddings. use_linear_projection (bool, optional, defaults to False): Deprecated argument. Will be removed in a future version. num_vector_embeds (bool, optional, defaults to False): Deprecated argument. Will be removed in a future version. """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock", "PatchEmbed"] _skip_layerwise_casting_patterns = ["pos_embed", "norm", "adaln_single"] @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 72, in_channels: int = 4, out_channels: Optional[int] = 8, num_layers: int = 28, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = 1152, attention_bias: bool = True, sample_size: int = 128, patch_size: int = 2, activation_fn: str = "gelu-approximate", num_embeds_ada_norm: Optional[int] = 1000, upcast_attention: bool = False, norm_type: str = "ada_norm_single", norm_elementwise_affine: bool = False, norm_eps: float = 1e-6, interpolation_scale: Optional[int] = None, use_additional_conditions: Optional[bool] = None, caption_channels: Optional[int] = None, attention_type: Optional[str] = "default", ): super().__init__() # Validate inputs. if norm_type != "ada_norm_single": raise NotImplementedError( f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." ) elif norm_type == "ada_norm_single" and num_embeds_ada_norm is None: raise ValueError( f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." ) # Set some common variables used across the board. self.attention_head_dim = attention_head_dim self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.out_channels = in_channels if out_channels is None else out_channels if use_additional_conditions is None: if sample_size == 128: use_additional_conditions = True else: use_additional_conditions = False self.use_additional_conditions = use_additional_conditions self.gradient_checkpointing = False # 2. Initialize the position embedding and transformer blocks. self.height = self.config.sample_size self.width = self.config.sample_size interpolation_scale = ( self.config.interpolation_scale if self.config.interpolation_scale is not None else max(self.config.sample_size // 64, 1) ) self.pos_embed = PatchEmbed( height=self.config.sample_size, width=self.config.sample_size, patch_size=self.config.patch_size, in_channels=self.config.in_channels, embed_dim=self.inner_dim, interpolation_scale=interpolation_scale, ) self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( self.inner_dim, self.config.num_attention_heads, self.config.attention_head_dim, dropout=self.config.dropout, cross_attention_dim=self.config.cross_attention_dim, activation_fn=self.config.activation_fn, num_embeds_ada_norm=self.config.num_embeds_ada_norm, attention_bias=self.config.attention_bias, upcast_attention=self.config.upcast_attention, norm_type=norm_type, norm_elementwise_affine=self.config.norm_elementwise_affine, norm_eps=self.config.norm_eps, attention_type=self.config.attention_type, ) for _ in range(self.config.num_layers) ] ) # 3. Output blocks. self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6) self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5) self.proj_out = nn.Linear(self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels) self.adaln_single = AdaLayerNormSingle( self.inner_dim, use_additional_conditions=self.use_additional_conditions ) self.caption_projection = None if self.config.caption_channels is not None: self.caption_projection = PixArtAlphaTextProjection( in_features=self.config.caption_channels, hidden_size=self.inner_dim ) @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. Safe to just use `AttnProcessor()` as PixArt doesn't have any exotic attention processors in default model. """ self.set_attn_processor(AttnProcessor()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ The [`PixArtTransformer2DModel`] forward method. Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input `hidden_states`. encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep (`torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. added_cond_kwargs: (`Dict[str, Any]`, *optional*): Additional conditions to be used as inputs. cross_attention_kwargs ( `Dict[str, Any]`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). attention_mask ( `torch.Tensor`, *optional*): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. encoder_attention_mask ( `torch.Tensor`, *optional*): Cross-attention mask applied to `encoder_hidden_states`. Two formats supported: * Mask `(batch, sequence_length)` True = keep, False = discard. * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format above. This bias will be added to the cross-attention scores. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if self.use_additional_conditions and added_cond_kwargs is None: raise ValueError("`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`.") # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Input batch_size = hidden_states.shape[0] height, width = ( hidden_states.shape[-2] // self.config.patch_size, hidden_states.shape[-1] // self.config.patch_size, ) hidden_states = self.pos_embed(hidden_states) timestep, embedded_timestep = self.adaln_single( timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) if self.caption_projection is not None: encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) # 2. Blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, None, ) else: hidden_states = block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=None, ) # 3. Output shift, scale = ( self.scale_shift_table[None] + embedded_timestep[:, None].to(self.scale_shift_table.device) ).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale.to(hidden_states.device)) + shift.to(hidden_states.device) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) # unpatchify hidden_states = hidden_states.reshape( shape=(-1, height, width, self.config.patch_size, self.config.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.config.patch_size, width * self.config.patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/pixart_transformer_2d.py", "repo_id": "diffusers", "token_count": 8739 }
165
# Copyright 2025 The Framepack Team, The Hunyuan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import USE_PEFT_BACKEND, get_logger, scale_lora_layers, unscale_lora_layers from ..cache_utils import CacheMixin from ..embeddings import get_1d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous from .transformer_hunyuan_video import ( HunyuanVideoConditionEmbedding, HunyuanVideoPatchEmbed, HunyuanVideoSingleTransformerBlock, HunyuanVideoTokenRefiner, HunyuanVideoTransformerBlock, ) logger = get_logger(__name__) # pylint: disable=invalid-name class HunyuanVideoFramepackRotaryPosEmbed(nn.Module): def __init__(self, patch_size: int, patch_size_t: int, rope_dim: List[int], theta: float = 256.0) -> None: super().__init__() self.patch_size = patch_size self.patch_size_t = patch_size_t self.rope_dim = rope_dim self.theta = theta def forward(self, frame_indices: torch.Tensor, height: int, width: int, device: torch.device): height = height // self.patch_size width = width // self.patch_size grid = torch.meshgrid( frame_indices.to(device=device, dtype=torch.float32), torch.arange(0, height, device=device, dtype=torch.float32), torch.arange(0, width, device=device, dtype=torch.float32), indexing="ij", ) # 3 * [W, H, T] grid = torch.stack(grid, dim=0) # [3, W, H, T] freqs = [] for i in range(3): freq = get_1d_rotary_pos_embed(self.rope_dim[i], grid[i].reshape(-1), self.theta, use_real=True) freqs.append(freq) freqs_cos = torch.cat([f[0] for f in freqs], dim=1) # (W * H * T, D / 2) freqs_sin = torch.cat([f[1] for f in freqs], dim=1) # (W * H * T, D / 2) return freqs_cos, freqs_sin class FramepackClipVisionProjection(nn.Module): def __init__(self, in_channels: int, out_channels: int): super().__init__() self.up = nn.Linear(in_channels, out_channels * 3) self.down = nn.Linear(out_channels * 3, out_channels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.up(hidden_states) hidden_states = F.silu(hidden_states) hidden_states = self.down(hidden_states) return hidden_states class HunyuanVideoHistoryPatchEmbed(nn.Module): def __init__(self, in_channels: int, inner_dim: int): super().__init__() self.proj = nn.Conv3d(in_channels, inner_dim, kernel_size=(1, 2, 2), stride=(1, 2, 2)) self.proj_2x = nn.Conv3d(in_channels, inner_dim, kernel_size=(2, 4, 4), stride=(2, 4, 4)) self.proj_4x = nn.Conv3d(in_channels, inner_dim, kernel_size=(4, 8, 8), stride=(4, 8, 8)) def forward( self, latents_clean: Optional[torch.Tensor] = None, latents_clean_2x: Optional[torch.Tensor] = None, latents_clean_4x: Optional[torch.Tensor] = None, ): if latents_clean is not None: latents_clean = self.proj(latents_clean) latents_clean = latents_clean.flatten(2).transpose(1, 2) if latents_clean_2x is not None: latents_clean_2x = _pad_for_3d_conv(latents_clean_2x, (2, 4, 4)) latents_clean_2x = self.proj_2x(latents_clean_2x) latents_clean_2x = latents_clean_2x.flatten(2).transpose(1, 2) if latents_clean_4x is not None: latents_clean_4x = _pad_for_3d_conv(latents_clean_4x, (4, 8, 8)) latents_clean_4x = self.proj_4x(latents_clean_4x) latents_clean_4x = latents_clean_4x.flatten(2).transpose(1, 2) return latents_clean, latents_clean_2x, latents_clean_4x class HunyuanVideoFramepackTransformer3DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin ): _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["x_embedder", "context_embedder", "norm"] _no_split_modules = [ "HunyuanVideoTransformerBlock", "HunyuanVideoSingleTransformerBlock", "HunyuanVideoHistoryPatchEmbed", "HunyuanVideoTokenRefiner", ] @register_to_config def __init__( self, in_channels: int = 16, out_channels: int = 16, num_attention_heads: int = 24, attention_head_dim: int = 128, num_layers: int = 20, num_single_layers: int = 40, num_refiner_layers: int = 2, mlp_ratio: float = 4.0, patch_size: int = 2, patch_size_t: int = 1, qk_norm: str = "rms_norm", guidance_embeds: bool = True, text_embed_dim: int = 4096, pooled_projection_dim: int = 768, rope_theta: float = 256.0, rope_axes_dim: Tuple[int] = (16, 56, 56), image_condition_type: Optional[str] = None, has_image_proj: int = False, image_proj_dim: int = 1152, has_clean_x_embedder: int = False, ) -> None: super().__init__() inner_dim = num_attention_heads * attention_head_dim out_channels = out_channels or in_channels # 1. Latent and condition embedders self.x_embedder = HunyuanVideoPatchEmbed((patch_size_t, patch_size, patch_size), in_channels, inner_dim) # Framepack history projection embedder self.clean_x_embedder = None if has_clean_x_embedder: self.clean_x_embedder = HunyuanVideoHistoryPatchEmbed(in_channels, inner_dim) self.context_embedder = HunyuanVideoTokenRefiner( text_embed_dim, num_attention_heads, attention_head_dim, num_layers=num_refiner_layers ) # Framepack image-conditioning embedder self.image_projection = FramepackClipVisionProjection(image_proj_dim, inner_dim) if has_image_proj else None self.time_text_embed = HunyuanVideoConditionEmbedding( inner_dim, pooled_projection_dim, guidance_embeds, image_condition_type ) # 2. RoPE self.rope = HunyuanVideoFramepackRotaryPosEmbed(patch_size, patch_size_t, rope_axes_dim, rope_theta) # 3. Dual stream transformer blocks self.transformer_blocks = nn.ModuleList( [ HunyuanVideoTransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_layers) ] ) # 4. Single stream transformer blocks self.single_transformer_blocks = nn.ModuleList( [ HunyuanVideoSingleTransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_single_layers) ] ) # 5. Output projection self.norm_out = AdaLayerNormContinuous(inner_dim, inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(inner_dim, patch_size_t * patch_size * patch_size * out_channels) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_hidden_states: torch.Tensor, encoder_attention_mask: torch.Tensor, pooled_projections: torch.Tensor, image_embeds: torch.Tensor, indices_latents: torch.Tensor, guidance: Optional[torch.Tensor] = None, latents_clean: Optional[torch.Tensor] = None, indices_latents_clean: Optional[torch.Tensor] = None, latents_history_2x: Optional[torch.Tensor] = None, indices_latents_history_2x: Optional[torch.Tensor] = None, latents_history_4x: Optional[torch.Tensor] = None, indices_latents_history_4x: Optional[torch.Tensor] = None, attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ): if attention_kwargs is not None: attention_kwargs = attention_kwargs.copy() lora_scale = attention_kwargs.pop("scale", 1.0) else: lora_scale = 1.0 if USE_PEFT_BACKEND: # weight the lora layers by setting `lora_scale` for each PEFT layer scale_lora_layers(self, lora_scale) else: if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: logger.warning( "Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." ) batch_size, num_channels, num_frames, height, width = hidden_states.shape p, p_t = self.config.patch_size, self.config.patch_size_t post_patch_num_frames = num_frames // p_t post_patch_height = height // p post_patch_width = width // p original_context_length = post_patch_num_frames * post_patch_height * post_patch_width if indices_latents is None: indices_latents = torch.arange(0, num_frames).unsqueeze(0).expand(batch_size, -1) hidden_states = self.x_embedder(hidden_states) image_rotary_emb = self.rope( frame_indices=indices_latents, height=height, width=width, device=hidden_states.device ) latents_clean, latents_history_2x, latents_history_4x = self.clean_x_embedder( latents_clean, latents_history_2x, latents_history_4x ) if latents_clean is not None and indices_latents_clean is not None: image_rotary_emb_clean = self.rope( frame_indices=indices_latents_clean, height=height, width=width, device=hidden_states.device ) if latents_history_2x is not None and indices_latents_history_2x is not None: image_rotary_emb_history_2x = self.rope( frame_indices=indices_latents_history_2x, height=height, width=width, device=hidden_states.device ) if latents_history_4x is not None and indices_latents_history_4x is not None: image_rotary_emb_history_4x = self.rope( frame_indices=indices_latents_history_4x, height=height, width=width, device=hidden_states.device ) hidden_states, image_rotary_emb = self._pack_history_states( hidden_states, latents_clean, latents_history_2x, latents_history_4x, image_rotary_emb, image_rotary_emb_clean, image_rotary_emb_history_2x, image_rotary_emb_history_4x, post_patch_height, post_patch_width, ) temb, _ = self.time_text_embed(timestep, pooled_projections, guidance) encoder_hidden_states = self.context_embedder(encoder_hidden_states, timestep, encoder_attention_mask) encoder_hidden_states_image = self.image_projection(image_embeds) attention_mask_image = encoder_attention_mask.new_ones((batch_size, encoder_hidden_states_image.shape[1])) # must cat before (not after) encoder_hidden_states, due to attn masking encoder_hidden_states = torch.cat([encoder_hidden_states_image, encoder_hidden_states], dim=1) encoder_attention_mask = torch.cat([attention_mask_image, encoder_attention_mask], dim=1) latent_sequence_length = hidden_states.shape[1] condition_sequence_length = encoder_hidden_states.shape[1] sequence_length = latent_sequence_length + condition_sequence_length attention_mask = torch.zeros( batch_size, sequence_length, device=hidden_states.device, dtype=torch.bool ) # [B, N] effective_condition_sequence_length = encoder_attention_mask.sum(dim=1, dtype=torch.int) # [B,] effective_sequence_length = latent_sequence_length + effective_condition_sequence_length if batch_size == 1: encoder_hidden_states = encoder_hidden_states[:, : effective_condition_sequence_length[0]] attention_mask = None else: for i in range(batch_size): attention_mask[i, : effective_sequence_length[i]] = True # [B, 1, 1, N], for broadcasting across attention heads attention_mask = attention_mask.unsqueeze(1).unsqueeze(1) if torch.is_grad_enabled() and self.gradient_checkpointing: for block in self.transformer_blocks: hidden_states, encoder_hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb ) for block in self.single_transformer_blocks: hidden_states, encoder_hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb ) else: for block in self.transformer_blocks: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb ) for block in self.single_transformer_blocks: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, attention_mask, image_rotary_emb ) hidden_states = hidden_states[:, -original_context_length:] hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, -1, p_t, p, p ) hidden_states = hidden_states.permute(0, 4, 1, 5, 2, 6, 3, 7) hidden_states = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) if USE_PEFT_BACKEND: # remove `lora_scale` from each PEFT layer unscale_lora_layers(self, lora_scale) if not return_dict: return (hidden_states,) return Transformer2DModelOutput(sample=hidden_states) def _pack_history_states( self, hidden_states: torch.Tensor, latents_clean: Optional[torch.Tensor] = None, latents_history_2x: Optional[torch.Tensor] = None, latents_history_4x: Optional[torch.Tensor] = None, image_rotary_emb: Tuple[torch.Tensor, torch.Tensor] = None, image_rotary_emb_clean: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, image_rotary_emb_history_2x: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, image_rotary_emb_history_4x: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, height: int = None, width: int = None, ): image_rotary_emb = list(image_rotary_emb) # convert tuple to list for in-place modification if latents_clean is not None and image_rotary_emb_clean is not None: hidden_states = torch.cat([latents_clean, hidden_states], dim=1) image_rotary_emb[0] = torch.cat([image_rotary_emb_clean[0], image_rotary_emb[0]], dim=0) image_rotary_emb[1] = torch.cat([image_rotary_emb_clean[1], image_rotary_emb[1]], dim=0) if latents_history_2x is not None and image_rotary_emb_history_2x is not None: hidden_states = torch.cat([latents_history_2x, hidden_states], dim=1) image_rotary_emb_history_2x = self._pad_rotary_emb(image_rotary_emb_history_2x, height, width, (2, 2, 2)) image_rotary_emb[0] = torch.cat([image_rotary_emb_history_2x[0], image_rotary_emb[0]], dim=0) image_rotary_emb[1] = torch.cat([image_rotary_emb_history_2x[1], image_rotary_emb[1]], dim=0) if latents_history_4x is not None and image_rotary_emb_history_4x is not None: hidden_states = torch.cat([latents_history_4x, hidden_states], dim=1) image_rotary_emb_history_4x = self._pad_rotary_emb(image_rotary_emb_history_4x, height, width, (4, 4, 4)) image_rotary_emb[0] = torch.cat([image_rotary_emb_history_4x[0], image_rotary_emb[0]], dim=0) image_rotary_emb[1] = torch.cat([image_rotary_emb_history_4x[1], image_rotary_emb[1]], dim=0) return hidden_states, tuple(image_rotary_emb) def _pad_rotary_emb( self, image_rotary_emb: Tuple[torch.Tensor], height: int, width: int, kernel_size: Tuple[int, int, int], ): # freqs_cos, freqs_sin have shape [W * H * T, D / 2], where D is attention head dim freqs_cos, freqs_sin = image_rotary_emb freqs_cos = freqs_cos.unsqueeze(0).permute(0, 2, 1).unflatten(2, (-1, height, width)) freqs_sin = freqs_sin.unsqueeze(0).permute(0, 2, 1).unflatten(2, (-1, height, width)) freqs_cos = _pad_for_3d_conv(freqs_cos, kernel_size) freqs_sin = _pad_for_3d_conv(freqs_sin, kernel_size) freqs_cos = _center_down_sample_3d(freqs_cos, kernel_size) freqs_sin = _center_down_sample_3d(freqs_sin, kernel_size) freqs_cos = freqs_cos.flatten(2).permute(0, 2, 1).squeeze(0) freqs_sin = freqs_sin.flatten(2).permute(0, 2, 1).squeeze(0) return freqs_cos, freqs_sin def _pad_for_3d_conv(x, kernel_size): if isinstance(x, (tuple, list)): return tuple(_pad_for_3d_conv(i, kernel_size) for i in x) b, c, t, h, w = x.shape pt, ph, pw = kernel_size pad_t = (pt - (t % pt)) % pt pad_h = (ph - (h % ph)) % ph pad_w = (pw - (w % pw)) % pw return torch.nn.functional.pad(x, (0, pad_w, 0, pad_h, 0, pad_t), mode="replicate") def _center_down_sample_3d(x, kernel_size): if isinstance(x, (tuple, list)): return tuple(_center_down_sample_3d(i, kernel_size) for i in x) return torch.nn.functional.avg_pool3d(x, kernel_size, stride=kernel_size)
diffusers/src/diffusers/models/transformers/transformer_hunyuan_video_framepack.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_hunyuan_video_framepack.py", "repo_id": "diffusers", "token_count": 8440 }
166
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import flax.linen as nn import jax.numpy as jnp from ..attention_flax import FlaxTransformer2DModel from ..resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D class FlaxCrossAttnDownBlock2D(nn.Module): r""" Cross Attention 2D Downsizing block - original architecture from Unet transformers: https://huggingface.co/papers/2103.06104 Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers num_attention_heads (:obj:`int`, *optional*, defaults to 1): Number of attention heads of each spatial transformer block add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsampling layer before each final output use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://huggingface.co/papers/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 add_downsample: bool = True use_linear_projection: bool = False only_cross_attention: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): resnets = [] attentions = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) attn_block = FlaxTransformer2DModel( in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype, ) attentions.append(attn_block) self.resnets = resnets self.attentions = attentions if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True): output_states = () for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb, deterministic=deterministic) hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) output_states += (hidden_states,) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class FlaxDownBlock2D(nn.Module): r""" Flax 2D downsizing block Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsampling layer before each final output dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 add_downsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, temb, deterministic=True): output_states = () for resnet in self.resnets: hidden_states = resnet(hidden_states, temb, deterministic=deterministic) output_states += (hidden_states,) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class FlaxCrossAttnUpBlock2D(nn.Module): r""" Cross Attention 2D Upsampling block - original architecture from Unet transformers: https://huggingface.co/papers/2103.06104 Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers num_attention_heads (:obj:`int`, *optional*, defaults to 1): Number of attention heads of each spatial transformer block add_upsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add upsampling layer before each final output use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://huggingface.co/papers/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int prev_output_channel: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 add_upsample: bool = True use_linear_projection: bool = False only_cross_attention: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): resnets = [] attentions = [] for i in range(self.num_layers): res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) attn_block = FlaxTransformer2DModel( in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype, ) attentions.append(attn_block) self.resnets = resnets self.attentions = attentions if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, deterministic=True): for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUpBlock2D(nn.Module): r""" Flax 2D upsampling block Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels prev_output_channel (:obj:`int`): Output channels from the previous block dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsampling layer before each final output dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int prev_output_channel: int dropout: float = 0.0 num_layers: int = 1 add_upsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, res_hidden_states_tuple, temb, deterministic=True): for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUNetMidBlock2DCrossAttn(nn.Module): r""" Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://huggingface.co/papers/2103.06104 Parameters: in_channels (:obj:`int`): Input channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers num_attention_heads (:obj:`int`, *optional*, defaults to 1): Number of attention heads of each spatial transformer block use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://huggingface.co/papers/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 use_linear_projection: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): # there is always at least one resnet resnets = [ FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, ) ] attentions = [] for _ in range(self.num_layers): attn_block = FlaxTransformer2DModel( in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype, ) attentions.append(attn_block) res_block = FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets self.attentions = attentions def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True): hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) return hidden_states
diffusers/src/diffusers/models/unets/unet_2d_blocks_flax.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_2d_blocks_flax.py", "repo_id": "diffusers", "token_count": 6971 }
167
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["encoders"] = ["FluxTextEncoderStep"] _import_structure["modular_blocks"] = [ "ALL_BLOCKS", "AUTO_BLOCKS", "TEXT2IMAGE_BLOCKS", "FluxAutoBeforeDenoiseStep", "FluxAutoBlocks", "FluxAutoBlocks", "FluxAutoDecodeStep", "FluxAutoDenoiseStep", ] _import_structure["modular_pipeline"] = ["FluxModularPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .encoders import FluxTextEncoderStep from .modular_blocks import ( ALL_BLOCKS, AUTO_BLOCKS, TEXT2IMAGE_BLOCKS, FluxAutoBeforeDenoiseStep, FluxAutoBlocks, FluxAutoDecodeStep, FluxAutoDenoiseStep, ) from .modular_pipeline import FluxModularPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/modular_pipelines/flux/__init__.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/flux/__init__.py", "repo_id": "diffusers", "token_count": 868 }
168
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...image_processor import PipelineImageInput from ...loaders import ModularIPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from ...pipelines.pipeline_utils import StableDiffusionMixin from ...pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from ...utils import logging from ..modular_pipeline import ModularPipeline from ..modular_pipeline_utils import InputParam, OutputParam logger = logging.get_logger(__name__) # pylint: disable=invalid-name # YiYi TODO: move to a different file? stable_diffusion_xl_module should have its own folder? # YiYi Notes: model specific components: ## (1) it should inherit from ModularPipeline ## (2) acts like a container that holds components and configs ## (3) define default config (related to components), e.g. default_sample_size, vae_scale_factor, num_channels_unet, num_channels_latents ## (4) inherit from model-specic loader class (e.g. StableDiffusionXLLoraLoaderMixin) ## (5) how to use together with Components_manager? class StableDiffusionXLModularPipeline( ModularPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, ModularIPAdapterMixin, ): """ A ModularPipeline for Stable Diffusion XL. <Tip warning={true}> This is an experimental feature and is likely to change in the future. </Tip> """ @property def default_height(self): return self.default_sample_size * self.vae_scale_factor @property def default_width(self): return self.default_sample_size * self.vae_scale_factor @property def default_sample_size(self): default_sample_size = 128 if hasattr(self, "unet") and self.unet is not None: default_sample_size = self.unet.config.sample_size return default_sample_size @property def vae_scale_factor(self): vae_scale_factor = 8 if hasattr(self, "vae") and self.vae is not None: vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) return vae_scale_factor @property def num_channels_unet(self): num_channels_unet = 4 if hasattr(self, "unet") and self.unet is not None: num_channels_unet = self.unet.config.in_channels return num_channels_unet @property def num_channels_latents(self): num_channels_latents = 4 if hasattr(self, "vae") and self.vae is not None: num_channels_latents = self.vae.config.latent_channels return num_channels_latents # YiYi/Sayak TODO: not used yet, maintain a list of schema that can be used across all pipeline blocks # auto_docstring SDXL_INPUTS_SCHEMA = { "prompt": InputParam( "prompt", type_hint=Union[str, List[str]], description="The prompt or prompts to guide the image generation" ), "prompt_2": InputParam( "prompt_2", type_hint=Union[str, List[str]], description="The prompt or prompts to be sent to the tokenizer_2 and text_encoder_2", ), "negative_prompt": InputParam( "negative_prompt", type_hint=Union[str, List[str]], description="The prompt or prompts not to guide the image generation", ), "negative_prompt_2": InputParam( "negative_prompt_2", type_hint=Union[str, List[str]], description="The negative prompt or prompts for text_encoder_2", ), "cross_attention_kwargs": InputParam( "cross_attention_kwargs", type_hint=Optional[dict], description="Kwargs dictionary passed to the AttentionProcessor", ), "clip_skip": InputParam( "clip_skip", type_hint=Optional[int], description="Number of layers to skip in CLIP text encoder" ), "image": InputParam( "image", type_hint=PipelineImageInput, required=True, description="The image(s) to modify for img2img or inpainting", ), "mask_image": InputParam( "mask_image", type_hint=PipelineImageInput, required=True, description="Mask image for inpainting, white pixels will be repainted", ), "generator": InputParam( "generator", type_hint=Optional[Union[torch.Generator, List[torch.Generator]]], description="Generator(s) for deterministic generation", ), "height": InputParam("height", type_hint=Optional[int], description="Height in pixels of the generated image"), "width": InputParam("width", type_hint=Optional[int], description="Width in pixels of the generated image"), "num_images_per_prompt": InputParam( "num_images_per_prompt", type_hint=int, default=1, description="Number of images to generate per prompt" ), "num_inference_steps": InputParam( "num_inference_steps", type_hint=int, default=50, description="Number of denoising steps" ), "timesteps": InputParam( "timesteps", type_hint=Optional[torch.Tensor], description="Custom timesteps for the denoising process" ), "sigmas": InputParam( "sigmas", type_hint=Optional[torch.Tensor], description="Custom sigmas for the denoising process" ), "denoising_end": InputParam( "denoising_end", type_hint=Optional[float], description="Fraction of denoising process to complete before termination", ), # YiYi Notes: img2img defaults to 0.3, inpainting defaults to 0.9999 "strength": InputParam( "strength", type_hint=float, default=0.3, description="How much to transform the reference image" ), "denoising_start": InputParam( "denoising_start", type_hint=Optional[float], description="Starting point of the denoising process" ), "latents": InputParam( "latents", type_hint=Optional[torch.Tensor], description="Pre-generated noisy latents for image generation" ), "padding_mask_crop": InputParam( "padding_mask_crop", type_hint=Optional[Tuple[int, int]], description="Size of margin in crop for image and mask", ), "original_size": InputParam( "original_size", type_hint=Optional[Tuple[int, int]], description="Original size of the image for SDXL's micro-conditioning", ), "target_size": InputParam( "target_size", type_hint=Optional[Tuple[int, int]], description="Target size for SDXL's micro-conditioning" ), "negative_original_size": InputParam( "negative_original_size", type_hint=Optional[Tuple[int, int]], description="Negative conditioning based on image resolution", ), "negative_target_size": InputParam( "negative_target_size", type_hint=Optional[Tuple[int, int]], description="Negative conditioning based on target resolution", ), "crops_coords_top_left": InputParam( "crops_coords_top_left", type_hint=Tuple[int, int], default=(0, 0), description="Top-left coordinates for SDXL's micro-conditioning", ), "negative_crops_coords_top_left": InputParam( "negative_crops_coords_top_left", type_hint=Tuple[int, int], default=(0, 0), description="Negative conditioning crop coordinates", ), "aesthetic_score": InputParam( "aesthetic_score", type_hint=float, default=6.0, description="Simulates aesthetic score of generated image" ), "negative_aesthetic_score": InputParam( "negative_aesthetic_score", type_hint=float, default=2.0, description="Simulates negative aesthetic score" ), "eta": InputParam("eta", type_hint=float, default=0.0, description="Parameter η in the DDIM paper"), "output_type": InputParam( "output_type", type_hint=str, default="pil", description="Output format (pil/tensor/np.array)" ), "ip_adapter_image": InputParam( "ip_adapter_image", type_hint=PipelineImageInput, required=True, description="Image(s) to be used as IP adapter", ), "control_image": InputParam( "control_image", type_hint=PipelineImageInput, required=True, description="ControlNet input condition" ), "control_guidance_start": InputParam( "control_guidance_start", type_hint=Union[float, List[float]], default=0.0, description="When ControlNet starts applying", ), "control_guidance_end": InputParam( "control_guidance_end", type_hint=Union[float, List[float]], default=1.0, description="When ControlNet stops applying", ), "controlnet_conditioning_scale": InputParam( "controlnet_conditioning_scale", type_hint=Union[float, List[float]], default=1.0, description="Scale factor for ControlNet outputs", ), "guess_mode": InputParam( "guess_mode", type_hint=bool, default=False, description="Enables ControlNet encoder to recognize input without prompts", ), "control_mode": InputParam( "control_mode", type_hint=List[int], required=True, description="Control mode for union controlnet" ), "prompt_embeds": InputParam( "prompt_embeds", type_hint=torch.Tensor, required=True, description="Text embeddings used to guide image generation", ), "negative_prompt_embeds": InputParam( "negative_prompt_embeds", type_hint=torch.Tensor, description="Negative text embeddings" ), "pooled_prompt_embeds": InputParam( "pooled_prompt_embeds", type_hint=torch.Tensor, required=True, description="Pooled text embeddings" ), "negative_pooled_prompt_embeds": InputParam( "negative_pooled_prompt_embeds", type_hint=torch.Tensor, description="Negative pooled text embeddings" ), "batch_size": InputParam("batch_size", type_hint=int, required=True, description="Number of prompts"), "dtype": InputParam("dtype", type_hint=torch.dtype, description="Data type of model tensor inputs"), "preprocess_kwargs": InputParam( "preprocess_kwargs", type_hint=Optional[dict], description="Kwargs for ImageProcessor" ), "latent_timestep": InputParam( "latent_timestep", type_hint=torch.Tensor, required=True, description="Initial noise level timestep" ), "image_latents": InputParam( "image_latents", type_hint=torch.Tensor, required=True, description="Latents representing reference image" ), "mask": InputParam("mask", type_hint=torch.Tensor, required=True, description="Mask for inpainting"), "masked_image_latents": InputParam( "masked_image_latents", type_hint=torch.Tensor, description="Masked image latents for inpainting" ), "add_time_ids": InputParam( "add_time_ids", type_hint=torch.Tensor, required=True, description="Time ids for conditioning" ), "negative_add_time_ids": InputParam( "negative_add_time_ids", type_hint=torch.Tensor, description="Negative time ids" ), "timestep_cond": InputParam("timestep_cond", type_hint=torch.Tensor, description="Timestep conditioning for LCM"), "noise": InputParam("noise", type_hint=torch.Tensor, description="Noise added to image latents"), "crops_coords": InputParam("crops_coords", type_hint=Optional[Tuple[int]], description="Crop coordinates"), "ip_adapter_embeds": InputParam( "ip_adapter_embeds", type_hint=List[torch.Tensor], description="Image embeddings for IP-Adapter" ), "negative_ip_adapter_embeds": InputParam( "negative_ip_adapter_embeds", type_hint=List[torch.Tensor], description="Negative image embeddings for IP-Adapter", ), "images": InputParam( "images", type_hint=Union[List[PIL.Image.Image], List[torch.Tensor], List[np.array]], required=True, description="Generated images", ), } SDXL_INTERMEDIATE_OUTPUTS_SCHEMA = { "prompt_embeds": OutputParam( "prompt_embeds", type_hint=torch.Tensor, description="Text embeddings used to guide image generation" ), "negative_prompt_embeds": OutputParam( "negative_prompt_embeds", type_hint=torch.Tensor, description="Negative text embeddings" ), "pooled_prompt_embeds": OutputParam( "pooled_prompt_embeds", type_hint=torch.Tensor, description="Pooled text embeddings" ), "negative_pooled_prompt_embeds": OutputParam( "negative_pooled_prompt_embeds", type_hint=torch.Tensor, description="Negative pooled text embeddings" ), "batch_size": OutputParam("batch_size", type_hint=int, description="Number of prompts"), "dtype": OutputParam("dtype", type_hint=torch.dtype, description="Data type of model tensor inputs"), "image_latents": OutputParam( "image_latents", type_hint=torch.Tensor, description="Latents representing reference image" ), "mask": OutputParam("mask", type_hint=torch.Tensor, description="Mask for inpainting"), "masked_image_latents": OutputParam( "masked_image_latents", type_hint=torch.Tensor, description="Masked image latents for inpainting" ), "crops_coords": OutputParam("crops_coords", type_hint=Optional[Tuple[int]], description="Crop coordinates"), "timesteps": OutputParam("timesteps", type_hint=torch.Tensor, description="Timesteps for inference"), "num_inference_steps": OutputParam("num_inference_steps", type_hint=int, description="Number of denoising steps"), "latent_timestep": OutputParam( "latent_timestep", type_hint=torch.Tensor, description="Initial noise level timestep" ), "add_time_ids": OutputParam("add_time_ids", type_hint=torch.Tensor, description="Time ids for conditioning"), "negative_add_time_ids": OutputParam( "negative_add_time_ids", type_hint=torch.Tensor, description="Negative time ids" ), "timestep_cond": OutputParam("timestep_cond", type_hint=torch.Tensor, description="Timestep conditioning for LCM"), "latents": OutputParam("latents", type_hint=torch.Tensor, description="Denoised latents"), "noise": OutputParam("noise", type_hint=torch.Tensor, description="Noise added to image latents"), "ip_adapter_embeds": OutputParam( "ip_adapter_embeds", type_hint=List[torch.Tensor], description="Image embeddings for IP-Adapter" ), "negative_ip_adapter_embeds": OutputParam( "negative_ip_adapter_embeds", type_hint=List[torch.Tensor], description="Negative image embeddings for IP-Adapter", ), "images": OutputParam( "images", type_hint=Union[List[PIL.Image.Image], List[torch.Tensor], List[np.array]], description="Generated images", ), } SDXL_OUTPUTS_SCHEMA = { "images": OutputParam( "images", type_hint=Union[ Tuple[Union[List[PIL.Image.Image], List[torch.Tensor], List[np.array]]], StableDiffusionXLPipelineOutput ], description="The final generated images", ) }
diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py/0
{ "file_path": "diffusers/src/diffusers/modular_pipelines/stable_diffusion_xl/modular_pipeline.py", "repo_id": "diffusers", "token_count": 6005 }
169
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...models import UVit2DModel, VQModel from ...schedulers import AmusedScheduler from ...utils import is_torch_xla_available, replace_example_docstring from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import AmusedImg2ImgPipeline >>> from diffusers.utils import load_image >>> pipe = AmusedImg2ImgPipeline.from_pretrained( ... "amused/amused-512", variant="fp16", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> prompt = "winter mountains" >>> input_image = ( ... load_image( ... "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg" ... ) ... .resize((512, 512)) ... .convert("RGB") ... ) >>> image = pipe(prompt, input_image).images[0] ``` """ class AmusedImg2ImgPipeline(DeprecatedPipelineMixin, DiffusionPipeline): _last_supported_version = "0.33.1" image_processor: VaeImageProcessor vqvae: VQModel tokenizer: CLIPTokenizer text_encoder: CLIPTextModelWithProjection transformer: UVit2DModel scheduler: AmusedScheduler model_cpu_offload_seq = "text_encoder->transformer->vqvae" # TODO - when calling self.vqvae.quantize, it uses self.vqvae.quantize.embedding.weight before # the forward method of self.vqvae.quantize, so the hook doesn't get called to move the parameter # off the meta device. There should be a way to fix this instead of just not offloading it _exclude_from_cpu_offload = ["vqvae"] def __init__( self, vqvae: VQModel, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, transformer: UVit2DModel, scheduler: AmusedScheduler, ): super().__init__() self.register_modules( vqvae=vqvae, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor = ( 2 ** (len(self.vqvae.config.block_out_channels) - 1) if getattr(self, "vqvae", None) else 8 ) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_normalize=False) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Optional[Union[List[str], str]] = None, image: PipelineImageInput = None, strength: float = 0.5, num_inference_steps: int = 12, guidance_scale: float = 10.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, generator: Optional[torch.Generator] = None, prompt_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_encoder_hidden_states: Optional[torch.Tensor] = None, output_type="pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, micro_conditioning_aesthetic_score: int = 6, micro_conditioning_crop_coord: Tuple[int, int] = (0, 0), temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. strength (`float`, *optional*, defaults to 0.5): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 12): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 10.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. A single vector from the pooled and projected final hidden states. encoder_hidden_states (`torch.Tensor`, *optional*): Pre-generated penultimate hidden states from the text encoder providing additional text conditioning. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. negative_encoder_hidden_states (`torch.Tensor`, *optional*): Analogous to `encoder_hidden_states` for the positive prompt. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). micro_conditioning_aesthetic_score (`int`, *optional*, defaults to 6): The targeted aesthetic score according to the laion aesthetic classifier. See https://laion.ai/blog/laion-aesthetics/ and the micro-conditioning section of https://huggingface.co/papers/2307.01952. micro_conditioning_crop_coord (`Tuple[int]`, *optional*, defaults to (0, 0)): The targeted height, width crop coordinates. See the micro-conditioning section of https://huggingface.co/papers/2307.01952. temperature (`Union[int, Tuple[int, int], List[int]]`, *optional*, defaults to (2, 0)): Configures the temperature scheduler on `self.scheduler` see `AmusedScheduler#set_timesteps`. Examples: Returns: [`~pipelines.pipeline_utils.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.pipeline_utils.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if (prompt_embeds is not None and encoder_hidden_states is None) or ( prompt_embeds is None and encoder_hidden_states is not None ): raise ValueError("pass either both `prompt_embeds` and `encoder_hidden_states` or neither") if (negative_prompt_embeds is not None and negative_encoder_hidden_states is None) or ( negative_prompt_embeds is None and negative_encoder_hidden_states is not None ): raise ValueError( "pass either both `negative_prompt_embeds` and `negative_encoder_hidden_states` or neither" ) if (prompt is None and prompt_embeds is None) or (prompt is not None and prompt_embeds is not None): raise ValueError("pass only one of `prompt` or `prompt_embeds`") if isinstance(prompt, str): prompt = [prompt] if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt if prompt_embeds is None: input_ids = self.tokenizer( prompt, return_tensors="pt", padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids.to(self._execution_device) outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) prompt_embeds = outputs.text_embeds encoder_hidden_states = outputs.hidden_states[-2] prompt_embeds = prompt_embeds.repeat(num_images_per_prompt, 1) encoder_hidden_states = encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) if guidance_scale > 1.0: if negative_prompt_embeds is None: if negative_prompt is None: negative_prompt = [""] * len(prompt) if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] input_ids = self.tokenizer( negative_prompt, return_tensors="pt", padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids.to(self._execution_device) outputs = self.text_encoder(input_ids, return_dict=True, output_hidden_states=True) negative_prompt_embeds = outputs.text_embeds negative_encoder_hidden_states = outputs.hidden_states[-2] negative_prompt_embeds = negative_prompt_embeds.repeat(num_images_per_prompt, 1) negative_encoder_hidden_states = negative_encoder_hidden_states.repeat(num_images_per_prompt, 1, 1) prompt_embeds = torch.concat([negative_prompt_embeds, prompt_embeds]) encoder_hidden_states = torch.concat([negative_encoder_hidden_states, encoder_hidden_states]) image = self.image_processor.preprocess(image) height, width = image.shape[-2:] # Note that the micro conditionings _do_ flip the order of width, height for the original size # and the crop coordinates. This is how it was done in the original code base micro_conds = torch.tensor( [ width, height, micro_conditioning_crop_coord[0], micro_conditioning_crop_coord[1], micro_conditioning_aesthetic_score, ], device=self._execution_device, dtype=encoder_hidden_states.dtype, ) micro_conds = micro_conds.unsqueeze(0) micro_conds = micro_conds.expand(2 * batch_size if guidance_scale > 1.0 else batch_size, -1) self.scheduler.set_timesteps(num_inference_steps, temperature, self._execution_device) num_inference_steps = int(len(self.scheduler.timesteps) * strength) start_timestep_idx = len(self.scheduler.timesteps) - num_inference_steps needs_upcasting = self.vqvae.dtype == torch.float16 and self.vqvae.config.force_upcast if needs_upcasting: self.vqvae.float() latents = self.vqvae.encode(image.to(dtype=self.vqvae.dtype, device=self._execution_device)).latents latents_bsz, channels, latents_height, latents_width = latents.shape latents = self.vqvae.quantize(latents)[2][2].reshape(latents_bsz, latents_height, latents_width) latents = self.scheduler.add_noise( latents, self.scheduler.timesteps[start_timestep_idx - 1], generator=generator ) latents = latents.repeat(num_images_per_prompt, 1, 1) with self.progress_bar(total=num_inference_steps) as progress_bar: for i in range(start_timestep_idx, len(self.scheduler.timesteps)): timestep = self.scheduler.timesteps[i] if guidance_scale > 1.0: model_input = torch.cat([latents] * 2) else: model_input = latents model_output = self.transformer( model_input, micro_conds=micro_conds, pooled_text_emb=prompt_embeds, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, ) if guidance_scale > 1.0: uncond_logits, cond_logits = model_output.chunk(2) model_output = uncond_logits + guidance_scale * (cond_logits - uncond_logits) latents = self.scheduler.step( model_output=model_output, timestep=timestep, sample=latents, generator=generator, ).prev_sample if i == len(self.scheduler.timesteps) - 1 or ((i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, timestep, latents) if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": output = latents else: output = self.vqvae.decode( latents, force_not_quantize=True, shape=( batch_size, height // self.vae_scale_factor, width // self.vae_scale_factor, self.vqvae.config.latent_channels, ), ).sample.clip(0, 1) output = self.image_processor.postprocess(output, output_type) if needs_upcasting: self.vqvae.half() self.maybe_free_model_hooks() if not return_dict: return (output,) return ImagePipelineOutput(output)
diffusers/src/diffusers/pipelines/amused/pipeline_amused_img2img.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/amused/pipeline_amused_img2img.py", "repo_id": "diffusers", "token_count": 7731 }
170
# Copyright 2025 AuraFlow Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import T5Tokenizer, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import VaeImageProcessor from ...loaders import AuraFlowLoraLoaderMixin from ...models import AuraFlowTransformer2DModel, AutoencoderKL from ...models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import AuraFlowPipeline >>> pipe = AuraFlowPipeline.from_pretrained("fal/AuraFlow", torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> prompt = "A cat holding a sign that says hello world" >>> image = pipe(prompt).images[0] >>> image.save("aura_flow.png") ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class AuraFlowPipeline(DiffusionPipeline, AuraFlowLoraLoaderMixin): r""" Args: tokenizer (`T5TokenizerFast`): Tokenizer of class [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). text_encoder ([`T5EncoderModel`]): Frozen text-encoder. AuraFlow uses [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the [EleutherAI/pile-t5-xl](https://huggingface.co/EleutherAI/pile-t5-xl) variant. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. transformer ([`AuraFlowTransformer2DModel`]): Conditional Transformer (MMDiT and DiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ _optional_components = [] model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = [ "latents", "prompt_embeds", ] def __init__( self, tokenizer: T5Tokenizer, text_encoder: UMT5EncoderModel, vae: AutoencoderKL, transformer: AuraFlowTransformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def check_inputs( self, prompt, height, width, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, callback_on_step_end_tensor_inputs=None, ): if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: raise ValueError( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Union[str, List[str]] = None, do_classifier_free_guidance: bool = True, num_images_per_prompt: int = 1, device: Optional[torch.device] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: int = 256, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for negative text embeddings. max_sequence_length (`int`, defaults to 256): Maximum sequence length to use for the prompt. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, AuraFlowLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if device is None: device = self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] max_length = max_sequence_length if prompt_embeds is None: text_inputs = self.tokenizer( prompt, truncation=True, max_length=max_length, padding="max_length", return_tensors="pt", ) text_input_ids = text_inputs["input_ids"] untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because T5 can only handle sequences up to" f" {max_length} tokens: {removed_text}" ) text_inputs = {k: v.to(device) for k, v in text_inputs.items()} prompt_embeds = self.text_encoder(**text_inputs)[0] prompt_attention_mask = text_inputs["attention_mask"].unsqueeze(-1).expand(prompt_embeds.shape) prompt_embeds = prompt_embeds * prompt_attention_mask if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.reshape(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" uncond_tokens = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, truncation=True, max_length=max_length, padding="max_length", return_tensors="pt", ) uncond_input = {k: v.to(device) for k, v in uncond_input.items()} negative_prompt_embeds = self.text_encoder(**uncond_input)[0] negative_prompt_attention_mask = ( uncond_input["attention_mask"].unsqueeze(-1).expand(negative_prompt_embeds.shape) ) negative_prompt_embeds = negative_prompt_embeds * negative_prompt_attention_mask if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.reshape(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None if self.text_encoder is not None: if isinstance(self, AuraFlowLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, FusedAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @property def guidance_scale(self): return self._guidance_scale @property def attention_kwargs(self): return self._attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: Union[str, List[str]] = None, num_inference_steps: int = 50, sigmas: List[float] = None, guidance_scale: float = 3.5, num_images_per_prompt: Optional[int] = 1, height: Optional[int] = 1024, width: Optional[int] = 1024, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: int = 256, output_type: Optional[str] = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], ) -> Union[ImagePipelineOutput, Tuple]: r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for best results. width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for best results. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 1. Check inputs. Raise error if not correct height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs # 2. Determine batch size. if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.attention_kwargs.get("scale", None) if self.attention_kwargs is not None else None # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, lora_scale=lora_scale, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) # 4. Prepare timesteps # sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents # aura use timestep value between 0 and 1, with t=1 as noise and t=0 as the image # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = torch.tensor([t / 1000]).expand(latent_model_input.shape[0]) timestep = timestep.to(latents.device, dtype=latents.dtype) # predict noise model_output noise_pred = self.transformer( latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, return_dict=False, attention_kwargs=self.attention_kwargs, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": image = latents else: # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py", "repo_id": "diffusers", "token_count": 14552 }
171
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, List, Optional, Union import torch from ...models import UNet2DModel from ...schedulers import CMStochasticIterativeScheduler from ...utils import ( is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import ConsistencyModelPipeline >>> device = "cuda" >>> # Load the cd_imagenet64_l2 checkpoint. >>> model_id_or_path = "openai/diffusers-cd_imagenet64_l2" >>> pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) >>> pipe.to(device) >>> # Onestep Sampling >>> image = pipe(num_inference_steps=1).images[0] >>> image.save("cd_imagenet64_l2_onestep_sample.png") >>> # Onestep sampling, class-conditional image generation >>> # ImageNet-64 class label 145 corresponds to king penguins >>> image = pipe(num_inference_steps=1, class_labels=145).images[0] >>> image.save("cd_imagenet64_l2_onestep_sample_penguin.png") >>> # Multistep sampling, class-conditional image generation >>> # Timesteps can be explicitly specified; the particular timesteps below are from the original GitHub repo: >>> # https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L77 >>> image = pipe(num_inference_steps=None, timesteps=[22, 0], class_labels=145).images[0] >>> image.save("cd_imagenet64_l2_multistep_sample_penguin.png") ``` """ class ConsistencyModelPipeline(DiffusionPipeline): r""" Pipeline for unconditional or class-conditional image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only compatible with [`CMStochasticIterativeScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet: UNet2DModel, scheduler: CMStochasticIterativeScheduler) -> None: super().__init__() self.register_modules( unet=unet, scheduler=scheduler, ) self.safety_checker = None def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Follows diffusers.VaeImageProcessor.postprocess def postprocess_image(self, sample: torch.Tensor, output_type: str = "pil"): if output_type not in ["pt", "np", "pil"]: raise ValueError( f"output_type={output_type} is not supported. Make sure to choose one of ['pt', 'np', or 'pil']" ) # Equivalent to diffusers.VaeImageProcessor.denormalize sample = (sample / 2 + 0.5).clamp(0, 1) if output_type == "pt": return sample # Equivalent to diffusers.VaeImageProcessor.pt_to_numpy sample = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == "np": return sample # Output_type must be 'pil' sample = self.numpy_to_pil(sample) return sample def prepare_class_labels(self, batch_size, device, class_labels=None): if self.unet.config.num_class_embeds is not None: if isinstance(class_labels, list): class_labels = torch.tensor(class_labels, dtype=torch.int) elif isinstance(class_labels, int): assert batch_size == 1, "Batch size must be 1 if classes is an int" class_labels = torch.tensor([class_labels], dtype=torch.int) elif class_labels is None: # Randomly generate batch_size class labels # TODO: should use generator here? int analogue of randn_tensor is not exposed in ...utils class_labels = torch.randint(0, self.unet.config.num_class_embeds, size=(batch_size,)) class_labels = class_labels.to(device) else: class_labels = None return class_labels def check_inputs(self, num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps): if num_inference_steps is None and timesteps is None: raise ValueError("Exactly one of `num_inference_steps` or `timesteps` must be supplied.") if num_inference_steps is not None and timesteps is not None: logger.warning( f"Both `num_inference_steps`: {num_inference_steps} and `timesteps`: {timesteps} are supplied;" " `timesteps` will be used over `num_inference_steps`." ) if latents is not None: expected_shape = (batch_size, 3, img_size, img_size) if latents.shape != expected_shape: raise ValueError(f"The shape of latents is {latents.shape} but is expected to be {expected_shape}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, batch_size: int = 1, class_labels: Optional[Union[torch.Tensor, List[int], int]] = None, num_inference_steps: int = 1, timesteps: List[int] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, ): r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. class_labels (`torch.Tensor` or `List[int]` or `int`, *optional*): Optional class labels for conditioning class-conditional consistency models. Not used if the model is not class-conditional. num_inference_steps (`int`, *optional*, defaults to 1): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Prepare call parameters img_size = self.unet.config.sample_size device = self._execution_device # 1. Check inputs self.check_inputs(num_inference_steps, timesteps, latents, batch_size, img_size, callback_steps) # 2. Prepare image latents # Sample image latents x_0 ~ N(0, sigma_0^2 * I) sample = self.prepare_latents( batch_size=batch_size, num_channels=self.unet.config.in_channels, height=img_size, width=img_size, dtype=self.unet.dtype, device=device, generator=generator, latents=latents, ) # 3. Handle class_labels for class-conditional models class_labels = self.prepare_class_labels(batch_size, device, class_labels=class_labels) # 4. Prepare timesteps if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps) timesteps = self.scheduler.timesteps # 5. Denoising loop # Multistep sampling: implements Algorithm 1 in the paper with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): scaled_sample = self.scheduler.scale_model_input(sample, t) model_output = self.unet(scaled_sample, t, class_labels=class_labels, return_dict=False)[0] sample = self.scheduler.step(model_output, t, sample, generator=generator)[0] # call the callback, if provided progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, sample) if XLA_AVAILABLE: xm.mark_step() # 6. Post-process image sample image = self.postprocess_image(sample, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/consistency_models/pipeline_consistency_models.py", "repo_id": "diffusers", "token_count": 5314 }
172
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["pipeline_stable_diffusion_3_controlnet"] = ["StableDiffusion3ControlNetPipeline"] _import_structure["pipeline_stable_diffusion_3_controlnet_inpainting"] = [ "StableDiffusion3ControlNetInpaintingPipeline" ] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_3_controlnet import StableDiffusion3ControlNetPipeline from .pipeline_stable_diffusion_3_controlnet_inpainting import StableDiffusion3ControlNetInpaintingPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/controlnet_sd3/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet_sd3/__init__.py", "repo_id": "diffusers", "token_count": 741 }
173
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, _LazyModule, ) _import_structure = {"pipeline_ddpm": ["DDPMPipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_ddpm import DDPMPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/ddpm/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ddpm/__init__.py", "repo_id": "diffusers", "token_count": 193 }
174
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class TransformationModelOutput(ModelOutput): """ Base class for text model's outputs that also contains a pooling of the last hidden states. Args: text_embeds (`torch.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The text embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ projection_state: Optional[torch.Tensor] = None last_hidden_state: torch.Tensor = None hidden_states: Optional[Tuple[torch.Tensor]] = None attentions: Optional[Tuple[torch.Tensor]] = None class RobertaSeriesConfig(XLMRobertaConfig): def __init__( self, pad_token_id=1, bos_token_id=0, eos_token_id=2, project_dim=512, pooler_fn="cls", learn_encoder=False, use_attention_mask=True, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.project_dim = project_dim self.pooler_fn = pooler_fn self.learn_encoder = learn_encoder self.use_attention_mask = use_attention_mask class RobertaSeriesModelWithTransformation(RobertaPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler", r"logit_scale"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] base_model_prefix = "roberta" config_class = RobertaSeriesConfig def __init__(self, config): super().__init__(config) self.roberta = XLMRobertaModel(config) self.transformation = nn.Linear(config.hidden_size, config.project_dim) self.has_pre_transformation = getattr(config, "has_pre_transformation", False) if self.has_pre_transformation: self.transformation_pre = nn.Linear(config.hidden_size, config.project_dim) self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ): r""" """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.base_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=return_dict, ) if self.has_pre_transformation: sequence_output2 = outputs["hidden_states"][-2] sequence_output2 = self.pre_LN(sequence_output2) projection_state2 = self.transformation_pre(sequence_output2) return TransformationModelOutput( projection_state=projection_state2, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: projection_state = self.transformation(outputs.last_hidden_state) return TransformationModelOutput( projection_state=projection_state, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/alt_diffusion/modeling_roberta_series.py", "repo_id": "diffusers", "token_count": 2322 }
175
# Copyright 2022 The Music Spectrogram Diffusion Authors. # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.t5.modeling_t5 import ( T5Block, T5Config, T5LayerNorm, ) from ....configuration_utils import ConfigMixin, register_to_config from ....models import ModelMixin class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): @register_to_config def __init__( self, input_dims: int, targets_context_length: int, d_model: int, dropout_rate: float, num_layers: int, num_heads: int, d_kv: int, d_ff: int, feed_forward_proj: str, is_decoder: bool = False, ): super().__init__() self.input_proj = nn.Linear(input_dims, d_model, bias=False) self.position_encoding = nn.Embedding(targets_context_length, d_model) self.position_encoding.weight.requires_grad = False self.dropout_pre = nn.Dropout(p=dropout_rate) t5config = T5Config( d_model=d_model, num_heads=num_heads, d_kv=d_kv, d_ff=d_ff, feed_forward_proj=feed_forward_proj, dropout_rate=dropout_rate, is_decoder=is_decoder, is_encoder_decoder=False, ) self.encoders = nn.ModuleList() for lyr_num in range(num_layers): lyr = T5Block(t5config) self.encoders.append(lyr) self.layer_norm = T5LayerNorm(d_model) self.dropout_post = nn.Dropout(p=dropout_rate) def forward(self, encoder_inputs, encoder_inputs_mask): x = self.input_proj(encoder_inputs) # terminal relative positional encodings max_positions = encoder_inputs.shape[1] input_positions = torch.arange(max_positions, device=encoder_inputs.device) seq_lens = encoder_inputs_mask.sum(-1) input_positions = torch.roll(input_positions.unsqueeze(0), tuple(seq_lens.tolist()), dims=0) x += self.position_encoding(input_positions) x = self.dropout_pre(x) # inverted the attention mask input_shape = encoder_inputs.size() extended_attention_mask = self.get_extended_attention_mask(encoder_inputs_mask, input_shape) for lyr in self.encoders: x = lyr(x, extended_attention_mask)[0] x = self.layer_norm(x) return self.dropout_post(x), encoder_inputs_mask
diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/spectrogram_diffusion/continuous_encoder.py", "repo_id": "diffusers", "token_count": 1329 }
176
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.utils.checkpoint from transformers import ( CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from ....image_processor import VaeImageProcessor from ....models import AutoencoderKL, DualTransformer2DModel, Transformer2DModel, UNet2DConditionModel from ....schedulers import KarrasDiffusionSchedulers from ....utils import deprecate, logging from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_text_unet import UNetFlatConditionModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name class VersatileDiffusionDualGuidedPipeline(DiffusionPipeline): r""" Pipeline for image-text dual-guided generation using Versatile Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqvae ([`VQModel`]): Vector-quantized (VQ) model to encode and decode images to and from latent representations. bert ([`LDMBertModel`]): Text-encoder model based on [`~transformers.BERT`]. tokenizer ([`~transformers.BertTokenizer`]): A `BertTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. """ model_cpu_offload_seq = "bert->unet->vqvae" tokenizer: CLIPTokenizer image_feature_extractor: CLIPImageProcessor text_encoder: CLIPTextModelWithProjection image_encoder: CLIPVisionModelWithProjection image_unet: UNet2DConditionModel text_unet: UNetFlatConditionModel vae: AutoencoderKL scheduler: KarrasDiffusionSchedulers _optional_components = ["text_unet"] def __init__( self, tokenizer: CLIPTokenizer, image_feature_extractor: CLIPImageProcessor, text_encoder: CLIPTextModelWithProjection, image_encoder: CLIPVisionModelWithProjection, image_unet: UNet2DConditionModel, text_unet: UNetFlatConditionModel, vae: AutoencoderKL, scheduler: KarrasDiffusionSchedulers, ): super().__init__() self.register_modules( tokenizer=tokenizer, image_feature_extractor=image_feature_extractor, text_encoder=text_encoder, image_encoder=image_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) if self.text_unet is not None and ( "dual_cross_attention" not in self.image_unet.config or not self.image_unet.config.dual_cross_attention ): # if loading from a universal checkpoint rather than a saved dual-guided pipeline self._convert_to_dual_attention() def remove_unused_weights(self): self.register_modules(text_unet=None) def _convert_to_dual_attention(self): """ Replace image_unet's `Transformer2DModel` blocks with `DualTransformer2DModel` that contains transformer blocks from both `image_unet` and `text_unet` """ for name, module in self.image_unet.named_modules(): if isinstance(module, Transformer2DModel): parent_name, index = name.rsplit(".", 1) index = int(index) image_transformer = self.image_unet.get_submodule(parent_name)[index] text_transformer = self.text_unet.get_submodule(parent_name)[index] config = image_transformer.config dual_transformer = DualTransformer2DModel( num_attention_heads=config.num_attention_heads, attention_head_dim=config.attention_head_dim, in_channels=config.in_channels, num_layers=config.num_layers, dropout=config.dropout, norm_num_groups=config.norm_num_groups, cross_attention_dim=config.cross_attention_dim, attention_bias=config.attention_bias, sample_size=config.sample_size, num_vector_embeds=config.num_vector_embeds, activation_fn=config.activation_fn, num_embeds_ada_norm=config.num_embeds_ada_norm, ) dual_transformer.transformers[0] = image_transformer dual_transformer.transformers[1] = text_transformer self.image_unet.get_submodule(parent_name)[index] = dual_transformer self.image_unet.register_to_config(dual_cross_attention=True) def _revert_dual_attention(self): """ Revert the image_unet `DualTransformer2DModel` blocks back to `Transformer2DModel` with image_unet weights Call this function if you reuse `image_unet` in another pipeline, e.g. `VersatileDiffusionPipeline` """ for name, module in self.image_unet.named_modules(): if isinstance(module, DualTransformer2DModel): parent_name, index = name.rsplit(".", 1) index = int(index) self.image_unet.get_submodule(parent_name)[index] = module.transformers[0] self.image_unet.register_to_config(dual_cross_attention=False) def _encode_text_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not """ def normalize_embeddings(encoder_output): embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) embeds_pooled = encoder_output.text_embeds embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) return embeds batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids if not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = normalize_embeddings(prompt_embeds) # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens = [""] * batch_size max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def _encode_image_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not """ def normalize_embeddings(encoder_output): embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state) embeds = self.image_encoder.visual_projection(embeds) embeds_pooled = embeds[:, 0:1] embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True) return embeds batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings image_input = self.image_feature_extractor(images=prompt, return_tensors="pt") pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype) image_embeddings = self.image_encoder(pixel_values) image_embeddings = normalize_embeddings(image_embeddings) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt") pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype) negative_prompt_embeds = self.image_encoder(pixel_values) negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and conditional embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings]) return image_embeddings # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, image, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, PIL.Image.Image) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` `PIL.Image` or `list` but is {type(prompt)}") if not isinstance(image, str) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list): raise ValueError(f"`image` has to be of type `str` `PIL.Image` or `list` but is {type(image)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def set_transformer_params(self, mix_ratio: float = 0.5, condition_types: Tuple = ("text", "image")): for name, module in self.image_unet.named_modules(): if isinstance(module, DualTransformer2DModel): module.mix_ratio = mix_ratio for i, type in enumerate(condition_types): if type == "text": module.condition_lengths[i] = self.text_encoder.config.max_position_embeddings module.transformer_index_for_condition[i] = 1 # use the second (text) transformer else: module.condition_lengths[i] = 257 module.transformer_index_for_condition[i] = 0 # use the first (image) transformer @torch.no_grad() def __call__( self, prompt: Union[PIL.Image.Image, List[PIL.Image.Image]], image: Union[str, List[str]], text_to_image_strength: float = 0.5, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. Examples: ```py >>> from diffusers import VersatileDiffusionDualGuidedPipeline >>> import torch >>> import requests >>> from io import BytesIO >>> from PIL import Image >>> # let's download an initial image >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg" >>> response = requests.get(url) >>> image = Image.open(BytesIO(response.content)).convert("RGB") >>> text = "a red car in the sun" >>> pipe = VersatileDiffusionDualGuidedPipeline.from_pretrained( ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 ... ) >>> pipe.remove_unused_weights() >>> pipe = pipe.to("cuda") >>> generator = torch.Generator(device="cuda").manual_seed(0) >>> text_to_image_strength = 0.75 >>> image = pipe( ... prompt=text, image=image, text_to_image_strength=text_to_image_strength, generator=generator ... ).images[0] >>> image.save("./car_variation.png") ``` Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.image_unet.config.sample_size * self.vae_scale_factor width = width or self.image_unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, image, height, width, callback_steps) # 2. Define call parameters prompt = [prompt] if not isinstance(prompt, list) else prompt image = [image] if not isinstance(image, list) else image batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompts prompt_embeds = self._encode_text_prompt(prompt, device, num_images_per_prompt, do_classifier_free_guidance) image_embeddings = self._encode_image_prompt(image, device, num_images_per_prompt, do_classifier_free_guidance) dual_prompt_embeddings = torch.cat([prompt_embeds, image_embeddings], dim=1) prompt_types = ("text", "image") # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.image_unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, dual_prompt_embeddings.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Combine the attention blocks of the image and text UNets self.set_transformer_params(text_to_image_strength, prompt_types) # 8. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=dual_prompt_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py", "repo_id": "diffusers", "token_count": 11633 }
177
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class LEditsPPDiffusionPipelineOutput(BaseOutput): """ Output class for LEdits++ Diffusion pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`List[bool]`) List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] @dataclass class LEditsPPInversionPipelineOutput(BaseOutput): """ Output class for LEdits++ Diffusion pipelines. Args: input_images (`List[PIL.Image.Image]` or `np.ndarray`) List of the cropped and resized input images as PIL images of length `batch_size` or NumPy array of shape ` (batch_size, height, width, num_channels)`. vae_reconstruction_images (`List[PIL.Image.Image]` or `np.ndarray`) List of VAE reconstruction of all input images as PIL images of length `batch_size` or NumPy array of shape ` (batch_size, height, width, num_channels)`. """ images: Union[List[PIL.Image.Image], np.ndarray] vae_reconstruction_images: Union[List[PIL.Image.Image], np.ndarray]
diffusers/src/diffusers/pipelines/ledits_pp/pipeline_output.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ledits_pp/pipeline_output.py", "repo_id": "diffusers", "token_count": 613 }
178
# Copyright 2025 The SkyReels-V2 Team, The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html import math import re from copy import deepcopy from typing import Any, Callable, Dict, List, Optional, Union import ftfy import torch from transformers import AutoTokenizer, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import SkyReelsV2LoraLoaderMixin from ...models import AutoencoderKLWan, SkyReelsV2Transformer3DModel from ...schedulers import UniPCMultistepScheduler from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import SkyReelsV2PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """\ Examples: ```py >>> import torch >>> from diffusers import ( ... SkyReelsV2DiffusionForcingPipeline, ... UniPCMultistepScheduler, ... AutoencoderKLWan, ... ) >>> from diffusers.utils import export_to_video >>> # Load the pipeline >>> # Available models: >>> # - Skywork/SkyReels-V2-DF-1.3B-540P-Diffusers >>> # - Skywork/SkyReels-V2-DF-14B-540P-Diffusers >>> # - Skywork/SkyReels-V2-DF-14B-720P-Diffusers >>> vae = AutoencoderKLWan.from_pretrained( ... "Skywork/SkyReels-V2-DF-14B-720P-Diffusers", ... subfolder="vae", ... torch_dtype=torch.float32, ... ) >>> pipe = SkyReelsV2DiffusionForcingPipeline.from_pretrained( ... "Skywork/SkyReels-V2-DF-14B-720P-Diffusers", ... vae=vae, ... torch_dtype=torch.bfloat16, ... ) >>> flow_shift = 8.0 # 8.0 for T2V, 5.0 for I2V >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift) >>> pipe = pipe.to("cuda") >>> prompt = "A cat and a dog baking a cake together in a kitchen. The cat is carefully measuring flour, while the dog is stirring the batter with a wooden spoon. The kitchen is cozy, with sunlight streaming through the window." >>> output = pipe( ... prompt=prompt, ... num_inference_steps=30, ... height=544, ... width=960, ... guidance_scale=6.0, # 6.0 for T2V, 5.0 for I2V ... num_frames=97, ... ar_step=5, # Controls asynchronous inference (0 for synchronous mode) ... causal_block_size=5, # Number of frames processed together in a causal block ... overlap_history=None, # Number of frames to overlap for smooth transitions in long videos ... addnoise_condition=20, # Improves consistency in long video generation ... ).frames[0] >>> export_to_video(output, "video.mp4", fps=24, quality=8) ``` """ def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): text = whitespace_clean(basic_clean(text)) return text # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class SkyReelsV2DiffusionForcingPipeline(DiffusionPipeline, SkyReelsV2LoraLoaderMixin): """ Pipeline for Text-to-Video (t2v) generation using SkyReels-V2 with diffusion forcing. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a specific device, etc.). Args: tokenizer ([`AutoTokenizer`]): Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. text_encoder ([`UMT5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. transformer ([`SkyReelsV2Transformer3DModel`]): Conditional Transformer to denoise the encoded image latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: UMT5EncoderModel, transformer: SkyReelsV2Transformer3DModel, vae: AutoencoderKLWan, scheduler: UniPCMultistepScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_clean(u) for u in prompt] batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask seq_lens = mask.gt(0).sum(dim=1).long() prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def check_inputs( self, prompt, negative_prompt, height, width, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, overlap_history=None, num_frames=None, base_num_frames=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") if num_frames > base_num_frames and overlap_history is None: raise ValueError( "`overlap_history` is required when `num_frames` exceeds `base_num_frames` to ensure smooth transitions in long video generation. " "Please specify a value for `overlap_history`. Recommended values are 17 or 37." ) def prepare_latents( self, batch_size: int, num_channels_latents: int = 16, height: int = 480, width: int = 832, num_frames: int = 97, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, base_latent_num_frames: Optional[int] = None, video_latents: Optional[torch.Tensor] = None, causal_block_size: Optional[int] = None, overlap_history_latent_frames: Optional[int] = None, long_video_iter: Optional[int] = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 latent_height = height // self.vae_scale_factor_spatial latent_width = width // self.vae_scale_factor_spatial prefix_video_latents = None prefix_video_latents_frames = 0 if video_latents is not None: # long video generation at the iterations other than the first one prefix_video_latents = video_latents[:, :, -overlap_history_latent_frames:] if prefix_video_latents.shape[2] % causal_block_size != 0: truncate_len_latents = prefix_video_latents.shape[2] % causal_block_size logger.warning( f"The length of prefix video latents is truncated by {truncate_len_latents} frames for the causal block size alignment. " f"This truncation ensures compatibility with the causal block size, which is required for proper processing. " f"However, it may slightly affect the continuity of the generated video at the truncation boundary." ) prefix_video_latents = prefix_video_latents[:, :, :-truncate_len_latents] prefix_video_latents_frames = prefix_video_latents.shape[2] finished_frame_num = ( long_video_iter * (base_latent_num_frames - overlap_history_latent_frames) + overlap_history_latent_frames ) left_frame_num = num_latent_frames - finished_frame_num num_latent_frames = min(left_frame_num + overlap_history_latent_frames, base_latent_num_frames) elif base_latent_num_frames is not None: # long video generation at the first iteration num_latent_frames = base_latent_num_frames else: # short video generation num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 shape = ( batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents, num_latent_frames, prefix_video_latents, prefix_video_latents_frames def generate_timestep_matrix( self, num_latent_frames: int, step_template: torch.Tensor, base_num_latent_frames: int, ar_step: int = 5, num_pre_ready: int = 0, causal_block_size: int = 1, shrink_interval_with_mask: bool = False, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, list[tuple]]: """ This function implements the core diffusion forcing algorithm that creates a coordinated denoising schedule across temporal frames. It supports both synchronous and asynchronous generation modes: **Synchronous Mode** (ar_step=0, causal_block_size=1): - All frames are denoised simultaneously at each timestep - Each frame follows the same denoising trajectory: [1000, 800, 600, ..., 0] - Simpler but may have less temporal consistency for long videos **Asynchronous Mode** (ar_step>0, causal_block_size>1): - Frames are grouped into causal blocks and processed block/chunk-wise - Each block is denoised in a staggered pattern creating a "denoising wave" - Earlier blocks are more denoised, later blocks lag behind by ar_step timesteps - Creates stronger temporal dependencies and better consistency Args: num_latent_frames (int): Total number of latent frames to generate step_template (torch.Tensor): Base timestep schedule (e.g., [1000, 800, 600, ..., 0]) base_num_latent_frames (int): Maximum frames the model can process in one forward pass ar_step (int, optional): Autoregressive step size for temporal lag. 0 = synchronous, >0 = asynchronous. Defaults to 5. num_pre_ready (int, optional): Number of frames already denoised (e.g., from prefix in a video2video task). Defaults to 0. causal_block_size (int, optional): Number of frames processed as a causal block. Defaults to 1. shrink_interval_with_mask (bool, optional): Whether to optimize processing intervals. Defaults to False. Returns: tuple containing: - step_matrix (torch.Tensor): Matrix of timesteps for each frame at each iteration Shape: [num_iterations, num_latent_frames] - step_index (torch.Tensor): Index matrix for timestep lookup Shape: [num_iterations, num_latent_frames] - step_update_mask (torch.Tensor): Boolean mask indicating which frames to update Shape: [num_iterations, num_latent_frames] - valid_interval (list[tuple]): List of (start, end) intervals for each iteration Raises: ValueError: If ar_step is too small for the given configuration """ # Initialize lists to store the scheduling matrices and metadata step_matrix, step_index = [], [] # Will store timestep values and indices for each iteration update_mask, valid_interval = [], [] # Will store update masks and processing intervals # Calculate total number of denoising iterations (add 1 for initial noise state) num_iterations = len(step_template) + 1 # Convert frame counts to block counts for causal processing # Each block contains causal_block_size frames that are processed together # E.g.: 25 frames ÷ 5 = 5 blocks total num_blocks = num_latent_frames // causal_block_size base_num_blocks = base_num_latent_frames // causal_block_size # Validate ar_step is sufficient for the given configuration # In asynchronous mode, we need enough timesteps to create the staggered pattern if base_num_blocks < num_blocks: min_ar_step = len(step_template) / base_num_blocks if ar_step < min_ar_step: raise ValueError(f"`ar_step` should be at least {math.ceil(min_ar_step)} in your setting") # Extend step_template with boundary values for easier indexing # 999: dummy value for counter starting from 1 # 0: final timestep (completely denoised) step_template = torch.cat( [ torch.tensor([999], dtype=torch.int64, device=step_template.device), step_template.long(), torch.tensor([0], dtype=torch.int64, device=step_template.device), ] ) # Initialize the previous row state (tracks denoising progress for each block) # 0 means not started, num_iterations means fully denoised pre_row = torch.zeros(num_blocks, dtype=torch.long) # Mark pre-ready frames (e.g., from prefix video for a video2video task) as already at final denoising state if num_pre_ready > 0: pre_row[: num_pre_ready // causal_block_size] = num_iterations # Main loop: Generate denoising schedule until all frames are fully denoised while not torch.all(pre_row >= (num_iterations - 1)): # Create new row representing the next denoising step new_row = torch.zeros(num_blocks, dtype=torch.long) # Apply diffusion forcing logic for each block for i in range(num_blocks): if i == 0 or pre_row[i - 1] >= ( num_iterations - 1 ): # the first frame or the last frame is completely denoised new_row[i] = pre_row[i] + 1 else: # Asynchronous mode: lag behind previous block by ar_step timesteps # This creates the "diffusion forcing" staggered pattern new_row[i] = new_row[i - 1] - ar_step # Clamp values to valid range [0, num_iterations] new_row = new_row.clamp(0, num_iterations) # Create update mask: True for blocks that need denoising update at this iteration # Exclude blocks that haven't started (new_row != pre_row) or are finished (new_row != num_iterations) # Final state example: [False, ..., False, True, True, True, True, True] # where first 20 frames are done (False) and last 5 frames still need updates (True) update_mask.append((new_row != pre_row) & (new_row != num_iterations)) # Store the iteration state step_index.append(new_row) # Index into step_template step_matrix.append(step_template[new_row]) # Actual timestep values pre_row = new_row # Update for next iteration # For videos longer than model capacity, we process in sliding windows terminal_flag = base_num_blocks # Optional optimization: shrink interval based on first update mask if shrink_interval_with_mask: idx_sequence = torch.arange(num_blocks, dtype=torch.int64) update_mask = update_mask[0] update_mask_idx = idx_sequence[update_mask] last_update_idx = update_mask_idx[-1].item() terminal_flag = last_update_idx + 1 # Each interval defines which frames to process in the current forward pass for curr_mask in update_mask: # Extend terminal flag if current mask has updates beyond current terminal if terminal_flag < num_blocks and curr_mask[terminal_flag]: terminal_flag += 1 # Create interval: [start, end) where start ensures we don't exceed model capacity valid_interval.append((max(terminal_flag - base_num_blocks, 0), terminal_flag)) # Convert lists to tensors for efficient processing step_update_mask = torch.stack(update_mask, dim=0) step_index = torch.stack(step_index, dim=0) step_matrix = torch.stack(step_matrix, dim=0) # Each block's schedule is replicated to all frames within that block if causal_block_size > 1: # Expand each block to causal_block_size frames step_update_mask = step_update_mask.unsqueeze(-1).repeat(1, 1, causal_block_size).flatten(1).contiguous() step_index = step_index.unsqueeze(-1).repeat(1, 1, causal_block_size).flatten(1).contiguous() step_matrix = step_matrix.unsqueeze(-1).repeat(1, 1, causal_block_size).flatten(1).contiguous() # Scale intervals from block-level to frame-level valid_interval = [(s * causal_block_size, e * causal_block_size) for s, e in valid_interval] return step_matrix, step_index, step_update_mask, valid_interval @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @property def attention_kwargs(self): return self._attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], negative_prompt: Union[str, List[str]] = None, height: int = 544, width: int = 960, num_frames: int = 97, num_inference_steps: int = 50, guidance_scale: float = 6.0, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "np", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, overlap_history: Optional[int] = None, addnoise_condition: float = 0, base_num_frames: int = 97, ar_step: int = 0, causal_block_size: Optional[int] = None, fps: int = 24, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, defaults to `544`): The height of the generated video. width (`int`, defaults to `960`): The width of the generated video. num_frames (`int`, defaults to `97`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `6.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. (**6.0 for T2V**, **5.0 for I2V**) num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`SkyReelsV2PipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to `512`): The maximum sequence length of the prompt. overlap_history (`int`, *optional*, defaults to `None`): Number of frames to overlap for smooth transitions in long videos. If `None`, the pipeline assumes short video generation mode, and no overlap is applied. 17 and 37 are recommended to set. addnoise_condition (`float`, *optional*, defaults to `0`): This is used to help smooth the long video generation by adding some noise to the clean condition. Too large noise can cause the inconsistency as well. 20 is a recommended value, and you may try larger ones, but it is recommended to not exceed 50. base_num_frames (`int`, *optional*, defaults to `97`): 97 or 121 | Base frame count (**97 for 540P**, **121 for 720P**) ar_step (`int`, *optional*, defaults to `0`): Controls asynchronous inference (0 for synchronous mode) You can set `ar_step=5` to enable asynchronous inference. When asynchronous inference, `causal_block_size=5` is recommended while it is not supposed to be set for synchronous generation. Asynchronous inference will take more steps to diffuse the whole sequence which means it will be SLOWER than synchronous mode. In our experiments, asynchronous inference may improve the instruction following and visual consistent performance. causal_block_size (`int`, *optional*, defaults to `None`): The number of frames in each block/chunk. Recommended when using asynchronous inference (when ar_step > 0) fps (`int`, *optional*, defaults to `24`): Frame rate of the generated video Examples: Returns: [`~SkyReelsV2PipelineOutput`] or `tuple`: If `return_dict` is `True`, [`SkyReelsV2PipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt, height, width, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, overlap_history, num_frames, base_num_frames, ) if addnoise_condition > 60: logger.warning( f"The value of 'addnoise_condition' is too large ({addnoise_condition}) and may cause inconsistencies in long video generation. A value of 20 is recommended." ) if num_frames % self.vae_scale_factor_temporal != 1: logger.warning( f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number." ) num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 num_frames = max(num_frames, 1) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) transformer_dtype = self.transformer.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) if negative_prompt_embeds is not None: negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps if causal_block_size is None: causal_block_size = self.transformer.config.num_frame_per_block else: self.transformer._set_ar_attention(causal_block_size) fps_embeds = [fps] * prompt_embeds.shape[0] fps_embeds = [0 if i == 16 else 1 for i in fps_embeds] # Determine if we're doing long video generation is_long_video = overlap_history is not None and base_num_frames is not None and num_frames > base_num_frames # Initialize accumulated_latents to store all latents in one tensor accumulated_latents = None if is_long_video: # Long video generation setup overlap_history_latent_frames = (overlap_history - 1) // self.vae_scale_factor_temporal + 1 num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 base_latent_num_frames = ( (base_num_frames - 1) // self.vae_scale_factor_temporal + 1 if base_num_frames is not None else num_latent_frames ) n_iter = ( 1 + (num_latent_frames - base_latent_num_frames - 1) // (base_latent_num_frames - overlap_history_latent_frames) + 1 ) else: # Short video generation setup n_iter = 1 base_latent_num_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 # Loop through iterations (multiple iterations only for long videos) for iter_idx in range(n_iter): if is_long_video: logger.debug(f"Processing iteration {iter_idx + 1}/{n_iter} for long video generation...") # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents, current_num_latent_frames, prefix_video_latents, prefix_video_latents_frames = ( self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents if iter_idx == 0 else None, video_latents=accumulated_latents, # Pass latents directly instead of decoded video base_latent_num_frames=base_latent_num_frames if is_long_video else None, causal_block_size=causal_block_size, overlap_history_latent_frames=overlap_history_latent_frames if is_long_video else None, long_video_iter=iter_idx if is_long_video else None, ) ) if prefix_video_latents_frames > 0: latents[:, :, :prefix_video_latents_frames, :, :] = prefix_video_latents.to(transformer_dtype) # 6. Prepare sample schedulers and timestep matrix sample_schedulers = [] for _ in range(current_num_latent_frames): sample_scheduler = deepcopy(self.scheduler) sample_scheduler.set_timesteps(num_inference_steps, device=device) sample_schedulers.append(sample_scheduler) # Different matrix generation for short vs long video step_matrix, _, step_update_mask, valid_interval = self.generate_timestep_matrix( current_num_latent_frames, timesteps, current_num_latent_frames if is_long_video else base_latent_num_frames, ar_step, prefix_video_latents_frames, causal_block_size, ) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(step_matrix) with self.progress_bar(total=len(step_matrix)) as progress_bar: for i, t in enumerate(step_matrix): if self.interrupt: continue self._current_timestep = t valid_interval_start, valid_interval_end = valid_interval[i] latent_model_input = ( latents[:, :, valid_interval_start:valid_interval_end, :, :].to(transformer_dtype).clone() ) timestep = t.expand(latents.shape[0], -1)[:, valid_interval_start:valid_interval_end].clone() if addnoise_condition > 0 and valid_interval_start < prefix_video_latents_frames: noise_factor = 0.001 * addnoise_condition latent_model_input[:, :, valid_interval_start:prefix_video_latents_frames, :, :] = ( latent_model_input[:, :, valid_interval_start:prefix_video_latents_frames, :, :] * (1.0 - noise_factor) + torch.randn_like( latent_model_input[:, :, valid_interval_start:prefix_video_latents_frames, :, :] ) * noise_factor ) timestep[:, valid_interval_start:prefix_video_latents_frames] = addnoise_condition noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, enable_diffusion_forcing=True, fps=fps_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_uncond = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, enable_diffusion_forcing=True, fps=fps_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond) update_mask_i = step_update_mask[i] for idx in range(valid_interval_start, valid_interval_end): if update_mask_i[idx].item(): latents[:, :, idx, :, :] = sample_schedulers[idx].step( noise_pred[:, :, idx - valid_interval_start, :, :], t[idx], latents[:, :, idx, :, :], return_dict=False, )[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(step_matrix) - 1 or ( (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 ): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # Handle latent accumulation for long videos or use the current latents for short videos if is_long_video: if accumulated_latents is None: accumulated_latents = latents else: # Keep overlap frames for conditioning but don't include them in final output accumulated_latents = torch.cat( [accumulated_latents, latents[:, :, overlap_history_latent_frames:]], dim=2 ) if is_long_video: latents = accumulated_latents self._current_timestep = None # Final decoding step - convert latents to pixels if not output_type == "latent": latents = latents.to(self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return SkyReelsV2PipelineOutput(frames=video)
diffusers/src/diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/skyreels_v2/pipeline_skyreels_v2_diffusion_forcing.py", "repo_id": "diffusers", "token_count": 21569 }
179
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard from PIL import Image from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from ...schedulers import ( FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, ) from ...utils import PIL_INTERPOLATION, logging, replace_example_docstring from ..pipeline_flax_utils import FlaxDiffusionPipeline from .pipeline_output import FlaxStableDiffusionPipelineOutput from .safety_checker_flax import FlaxStableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Set to True to use python for loop instead of jax.fori_loop for easier debugging DEBUG = False EXAMPLE_DOC_STRING = """ Examples: ```py >>> import jax >>> import numpy as np >>> import jax.numpy as jnp >>> from flax.jax_utils import replicate >>> from flax.training.common_utils import shard >>> import requests >>> from io import BytesIO >>> from PIL import Image >>> from diffusers import FlaxStableDiffusionImg2ImgPipeline >>> def create_key(seed=0): ... return jax.random.PRNGKey(seed) >>> rng = create_key(0) >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" >>> response = requests.get(url) >>> init_img = Image.open(BytesIO(response.content)).convert("RGB") >>> init_img = init_img.resize((768, 512)) >>> prompts = "A fantasy landscape, trending on artstation" >>> pipeline, params = FlaxStableDiffusionImg2ImgPipeline.from_pretrained( ... "CompVis/stable-diffusion-v1-4", ... revision="flax", ... dtype=jnp.bfloat16, ... ) >>> num_samples = jax.device_count() >>> rng = jax.random.split(rng, jax.device_count()) >>> prompt_ids, processed_image = pipeline.prepare_inputs( ... prompt=[prompts] * num_samples, image=[init_img] * num_samples ... ) >>> p_params = replicate(params) >>> prompt_ids = shard(prompt_ids) >>> processed_image = shard(processed_image) >>> output = pipeline( ... prompt_ids=prompt_ids, ... image=processed_image, ... params=p_params, ... prng_seed=rng, ... strength=0.75, ... num_inference_steps=50, ... jit=True, ... height=512, ... width=768, ... ).images >>> output_images = pipeline.numpy_to_pil(np.asarray(output.reshape((num_samples,) + output.shape[-3:]))) ``` """ class FlaxStableDiffusionImg2ImgPipeline(FlaxDiffusionPipeline): r""" Flax-based pipeline for text-guided image-to-image generation using Stable Diffusion. This model inherits from [`FlaxDiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`FlaxAutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.FlaxCLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`FlaxUNet2DConditionModel`]): A `FlaxUNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`FlaxDDIMScheduler`], [`FlaxLMSDiscreteScheduler`], [`FlaxPNDMScheduler`], or [`FlaxDPMSolverMultistepScheduler`]. safety_checker ([`FlaxStableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: FlaxAutoencoderKL, text_encoder: FlaxCLIPTextModel, tokenizer: CLIPTokenizer, unet: FlaxUNet2DConditionModel, scheduler: Union[ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler ], safety_checker: FlaxStableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, dtype: jnp.dtype = jnp.float32, ): super().__init__() self.dtype = dtype if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 def prepare_inputs(self, prompt: Union[str, List[str]], image: Union[Image.Image, List[Image.Image]]): if not isinstance(prompt, (str, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if not isinstance(image, (Image.Image, list)): raise ValueError(f"image has to be of type `PIL.Image.Image` or list but is {type(image)}") if isinstance(image, Image.Image): image = [image] processed_images = jnp.concatenate([preprocess(img, jnp.float32) for img in image]) text_input = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) return text_input.input_ids, processed_images def _get_has_nsfw_concepts(self, features, params): has_nsfw_concepts = self.safety_checker(features, params) return has_nsfw_concepts def _run_safety_checker(self, images, safety_model_params, jit=False): # safety_model_params should already be replicated when jit is True pil_images = [Image.fromarray(image) for image in images] features = self.feature_extractor(pil_images, return_tensors="np").pixel_values if jit: features = shard(features) has_nsfw_concepts = _p_get_has_nsfw_concepts(self, features, safety_model_params) has_nsfw_concepts = unshard(has_nsfw_concepts) safety_model_params = unreplicate(safety_model_params) else: has_nsfw_concepts = self._get_has_nsfw_concepts(features, safety_model_params) images_was_copied = False for idx, has_nsfw_concept in enumerate(has_nsfw_concepts): if has_nsfw_concept: if not images_was_copied: images_was_copied = True images = images.copy() images[idx] = np.zeros(images[idx].shape, dtype=np.uint8) # black image if any(has_nsfw_concepts): warnings.warn( "Potential NSFW content was detected in one or more images. A black image will be returned" " instead. Try again with a different prompt and/or seed." ) return images, has_nsfw_concepts def get_timestep_start(self, num_inference_steps, strength): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) return t_start def _generate( self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, start_timestep: int, num_inference_steps: int, height: int, width: int, guidance_scale: float, noise: Optional[jnp.ndarray] = None, neg_prompt_ids: Optional[jnp.ndarray] = None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") # get prompt text embeddings prompt_embeds = self.text_encoder(prompt_ids, params=params["text_encoder"])[0] # TODO: currently it is assumed `do_classifier_free_guidance = guidance_scale > 1.0` # implement this conditional `do_classifier_free_guidance = guidance_scale > 1.0` batch_size = prompt_ids.shape[0] max_length = prompt_ids.shape[-1] if neg_prompt_ids is None: uncond_input = self.tokenizer( [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="np" ).input_ids else: uncond_input = neg_prompt_ids negative_prompt_embeds = self.text_encoder(uncond_input, params=params["text_encoder"])[0] context = jnp.concatenate([negative_prompt_embeds, prompt_embeds]) latents_shape = ( batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if noise is None: noise = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) else: if noise.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {noise.shape}, expected {latents_shape}") # Create init_latents init_latent_dist = self.vae.apply({"params": params["vae"]}, image, method=self.vae.encode).latent_dist init_latents = init_latent_dist.sample(key=prng_seed).transpose((0, 3, 1, 2)) init_latents = self.vae.config.scaling_factor * init_latents def loop_body(step, args): latents, scheduler_state = args # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) # predict the noise residual noise_pred = self.unet.apply( {"params": params["unet"]}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=context, ).sample # perform guidance noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return latents, scheduler_state scheduler_state = self.scheduler.set_timesteps( params["scheduler"], num_inference_steps=num_inference_steps, shape=latents_shape ) latent_timestep = scheduler_state.timesteps[start_timestep : start_timestep + 1].repeat(batch_size) latents = self.scheduler.add_noise(params["scheduler"], init_latents, noise, latent_timestep) # scale the initial noise by the standard deviation required by the scheduler latents = latents * params["scheduler"].init_noise_sigma if DEBUG: # run with python for loop for i in range(start_timestep, num_inference_steps): latents, scheduler_state = loop_body(i, (latents, scheduler_state)) else: latents, _ = jax.lax.fori_loop(start_timestep, num_inference_steps, loop_body, (latents, scheduler_state)) # scale and decode the image latents with vae latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt_ids: jnp.ndarray, image: jnp.ndarray, params: Union[Dict, FrozenDict], prng_seed: jax.Array, strength: float = 0.8, num_inference_steps: int = 50, height: Optional[int] = None, width: Optional[int] = None, guidance_scale: Union[float, jnp.ndarray] = 7.5, noise: jnp.ndarray = None, neg_prompt_ids: jnp.ndarray = None, return_dict: bool = True, jit: bool = False, ): r""" The call function to the pipeline for generation. Args: prompt_ids (`jnp.ndarray`): The prompt or prompts to guide image generation. image (`jnp.ndarray`): Array representing an image batch to be used as the starting point. params (`Dict` or `FrozenDict`): Dictionary containing the model parameters/weights. prng_seed (`jax.Array` or `jax.Array`): Array containing random number generator key. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. noise (`jnp.ndarray`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. The array is generated by sampling using the supplied random `generator`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] instead of a plain tuple. jit (`bool`, defaults to `False`): Whether to run `pmap` versions of the generation and safety scoring functions. <Tip warning={true}> This argument exists because `__call__` is not yet end-to-end pmap-able. It will be removed in a future release. </Tip> Examples: Returns: [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.FlaxStableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(guidance_scale, float): # Convert to a tensor so each device gets a copy. Follow the prompt_ids for # shape information, as they may be sharded (when `jit` is `True`), or not. guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) if len(prompt_ids.shape) > 2: # Assume sharded guidance_scale = guidance_scale[:, None] start_timestep = self.get_timestep_start(num_inference_steps, strength) if jit: images = _p_generate( self, prompt_ids, image, params, prng_seed, start_timestep, num_inference_steps, height, width, guidance_scale, noise, neg_prompt_ids, ) else: images = self._generate( prompt_ids, image, params, prng_seed, start_timestep, num_inference_steps, height, width, guidance_scale, noise, neg_prompt_ids, ) if self.safety_checker is not None: safety_params = params["safety_checker"] images_uint8_casted = (images * 255).round().astype("uint8") num_devices, batch_size = images.shape[:2] images_uint8_casted = np.asarray(images_uint8_casted).reshape(num_devices * batch_size, height, width, 3) images_uint8_casted, has_nsfw_concept = self._run_safety_checker(images_uint8_casted, safety_params, jit) images = np.asarray(images) # block images if any(has_nsfw_concept): for i, is_nsfw in enumerate(has_nsfw_concept): if is_nsfw: images[i] = np.asarray(images_uint8_casted[i]) images = images.reshape(num_devices, batch_size, height, width, 3) else: images = np.asarray(images) has_nsfw_concept = False if not return_dict: return (images, has_nsfw_concept) return FlaxStableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept) # Static argnums are pipe, start_timestep, num_inference_steps, height, width. A change would trigger recompilation. # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). @partial( jax.pmap, in_axes=(None, 0, 0, 0, 0, None, None, None, None, 0, 0, 0), static_broadcasted_argnums=(0, 5, 6, 7, 8), ) def _p_generate( pipe, prompt_ids, image, params, prng_seed, start_timestep, num_inference_steps, height, width, guidance_scale, noise, neg_prompt_ids, ): return pipe._generate( prompt_ids, image, params, prng_seed, start_timestep, num_inference_steps, height, width, guidance_scale, noise, neg_prompt_ids, ) @partial(jax.pmap, static_broadcasted_argnums=(0,)) def _p_get_has_nsfw_concepts(pipe, features, params): return pipe._get_has_nsfw_concepts(features, params) def unshard(x: jnp.ndarray): # einops.rearrange(x, 'd b ... -> (d b) ...') num_devices, batch_size = x.shape[:2] rest = x.shape[2:] return x.reshape(num_devices * batch_size, *rest) def preprocess(image, dtype): w, h = image.size w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = jnp.array(image).astype(dtype) / 255.0 image = image[None].transpose(0, 3, 1, 2) return 2.0 * image - 1.0
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py", "repo_id": "diffusers", "token_count": 9920 }
180
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import VaeImageProcessor from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.embeddings import get_timestep_embedding from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import requests >>> import torch >>> from PIL import Image >>> from io import BytesIO >>> from diffusers import StableUnCLIPImg2ImgPipeline >>> pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( ... "stabilityai/stable-diffusion-2-1-unclip-small", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" >>> response = requests.get(url) >>> init_image = Image.open(BytesIO(response.content)).convert("RGB") >>> init_image = init_image.resize((768, 512)) >>> prompt = "A fantasy landscape, trending on artstation" >>> images = pipe(init_image, prompt).images >>> images[0].save("fantasy_landscape.png") ``` """ class StableUnCLIPImg2ImgPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin ): """ Pipeline for text-guided image-to-image generation using stable unCLIP. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: feature_extractor ([`CLIPImageProcessor`]): Feature extractor for image pre-processing before being encoded. image_encoder ([`CLIPVisionModelWithProjection`]): CLIP vision model for encoding images. image_normalizer ([`StableUnCLIPImageNormalizer`]): Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image embeddings after the noise has been applied. image_noising_scheduler ([`KarrasDiffusionSchedulers`]): Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined by the `noise_level`. tokenizer (`~transformers.CLIPTokenizer`): A [`~transformers.CLIPTokenizer`)]. text_encoder ([`~transformers.CLIPTextModel`]): Frozen [`~transformers.CLIPTextModel`] text-encoder. unet ([`UNet2DConditionModel`]): A [`UNet2DConditionModel`] to denoise the encoded image latents. scheduler ([`KarrasDiffusionSchedulers`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" _exclude_from_cpu_offload = ["image_normalizer"] # image encoding components feature_extractor: CLIPImageProcessor image_encoder: CLIPVisionModelWithProjection # image noising components image_normalizer: StableUnCLIPImageNormalizer image_noising_scheduler: KarrasDiffusionSchedulers # regular denoising components tokenizer: CLIPTokenizer text_encoder: CLIPTextModel unet: UNet2DConditionModel scheduler: KarrasDiffusionSchedulers vae: AutoencoderKL def __init__( self, # image encoding components feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection, # image noising components image_normalizer: StableUnCLIPImageNormalizer, image_noising_scheduler: KarrasDiffusionSchedulers, # regular denoising components tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, # vae vae: AutoencoderKL, ): super().__init__() self.register_modules( feature_extractor=feature_extractor, image_encoder=image_encoder, image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, vae=vae, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds def _encode_image( self, image, device, batch_size, num_images_per_prompt, do_classifier_free_guidance, noise_level, generator, image_embeds, ): dtype = next(self.image_encoder.parameters()).dtype if isinstance(image, PIL.Image.Image): # the image embedding should repeated so it matches the total batch size of the prompt repeat_by = batch_size else: # assume the image input is already properly batched and just needs to be repeated so # it matches the num_images_per_prompt. # # NOTE(will) this is probably missing a few number of side cases. I.e. batched/non-batched # `image_embeds`. If those happen to be common use cases, let's think harder about # what the expected dimensions of inputs should be and how we handle the encoding. repeat_by = num_images_per_prompt if image_embeds is None: if not isinstance(image, torch.Tensor): image = self.feature_extractor(images=image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = self.noise_image_embeddings( image_embeds=image_embeds, noise_level=noise_level, generator=generator, ) # duplicate image embeddings for each generation per prompt, using mps friendly method image_embeds = image_embeds.unsqueeze(1) bs_embed, seq_len, _ = image_embeds.shape image_embeds = image_embeds.repeat(1, repeat_by, 1) image_embeds = image_embeds.view(bs_embed * repeat_by, seq_len, -1) image_embeds = image_embeds.squeeze(1) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeds) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) return image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, image, height, width, callback_steps, noise_level, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, image_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two." ) if prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined." ) if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: raise ValueError( f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." ) if image is not None and image_embeds is not None: raise ValueError( "Provide either `image` or `image_embeds`. Please make sure to define only one of the two." ) if image is None and image_embeds is None: raise ValueError( "Provide either `image` or `image_embeds`. Cannot leave both `image` and `image_embeds` undefined." ) if image is not None: if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_unclip.StableUnCLIPPipeline.noise_image_embeddings def noise_image_embeddings( self, image_embeds: torch.Tensor, noise_level: int, noise: Optional[torch.Tensor] = None, generator: Optional[torch.Generator] = None, ): """ Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher `noise_level` increases the variance in the final un-noised images. The noise is applied in two ways: 1. A noise schedule is applied directly to the embeddings. 2. A vector of sinusoidal time embeddings are appended to the output. In both cases, the amount of noise is controlled by the same `noise_level`. The embeddings are normalized before the noise is applied and un-normalized after the noise is applied. """ if noise is None: noise = randn_tensor( image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype ) noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) self.image_normalizer.to(image_embeds.device) image_embeds = self.image_normalizer.scale(image_embeds) image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) image_embeds = self.image_normalizer.unscale(image_embeds) noise_level = get_timestep_embedding( timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0 ) # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors, # but we might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. noise_level = noise_level.to(image_embeds.dtype) image_embeds = torch.cat((image_embeds, noise_level), 1) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: Union[torch.Tensor, PIL.Image.Image] = None, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 20, guidance_scale: float = 10, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, noise_level: int = 0, image_embeds: Optional[torch.Tensor] = None, clip_skip: Optional[int] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, either `prompt_embeds` will be used or prompt is initialized to `""`. image (`torch.Tensor` or `PIL.Image.Image`): `Image` or tensor representing an image batch. The image is encoded to its CLIP embedding which the `unet` is conditioned on. The image is _not_ encoded by the `vae` and then used as the latents in the denoising process like it is in the standard Stable Diffusion text-guided image variation process. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 20): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 10.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). noise_level (`int`, *optional*, defaults to `0`): The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details. image_embeds (`torch.Tensor`, *optional*): Pre-generated CLIP embeddings to condition the `unet` on. These latents are not used in the denoising process. If you want to provide pre-generated latents, pass them to `__call__` as `latents`. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if prompt is None and prompt_embeds is None: prompt = len(image) * [""] if isinstance(image, list) else "" # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, image=image, height=height, width=width, callback_steps=callback_steps, noise_level=noise_level, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, image_embeds=image_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Encoder input image noise_level = torch.tensor([noise_level], device=device) image_embeds = self._encode_image( image=image, device=device, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, noise_level=noise_level, generator=generator, image_embeds=image_embeds, ) # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels if latents is None: latents = self.prepare_latents( batch_size=batch_size, num_channels_latents=num_channels_latents, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, class_labels=image_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # 9. Post-processing if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip_img2img.py", "repo_id": "diffusers", "token_count": 17506 }
181
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_k_diffusion_available, is_k_diffusion_version, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not ( is_transformers_available() and is_torch_available() and is_k_diffusion_available() and is_k_diffusion_version(">=", "0.0.12") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_k_diffusion_objects)) else: _import_structure["pipeline_stable_diffusion_k_diffusion"] = ["StableDiffusionKDiffusionPipeline"] _import_structure["pipeline_stable_diffusion_xl_k_diffusion"] = ["StableDiffusionXLKDiffusionPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not ( is_transformers_available() and is_torch_available() and is_k_diffusion_available() and is_k_diffusion_version(">=", "0.0.12") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline from .pipeline_stable_diffusion_xl_k_diffusion import StableDiffusionXLKDiffusionPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_k_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 812 }
182
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import os import tempfile from typing import TYPE_CHECKING, Dict from huggingface_hub import DDUFEntry from tqdm import tqdm from ..utils import is_safetensors_available, is_transformers_available, is_transformers_version if TYPE_CHECKING: from transformers import PreTrainedModel, PreTrainedTokenizer if is_transformers_available(): from transformers import PreTrainedModel, PreTrainedTokenizer if is_safetensors_available(): import safetensors.torch def _load_tokenizer_from_dduf( cls: "PreTrainedTokenizer", name: str, dduf_entries: Dict[str, DDUFEntry], **kwargs ) -> "PreTrainedTokenizer": """ Load a tokenizer from a DDUF archive. In practice, `transformers` do not provide a way to load a tokenizer from a DDUF archive. This function is a workaround by extracting the tokenizer files from the DDUF archive and loading the tokenizer from the extracted files. There is an extra cost of extracting the files, but of limited impact as the tokenizer files are usually small-ish. """ with tempfile.TemporaryDirectory() as tmp_dir: for entry_name, entry in dduf_entries.items(): if entry_name.startswith(name + "/"): tmp_entry_path = os.path.join(tmp_dir, *entry_name.split("/")) # need to create intermediary directory if they don't exist os.makedirs(os.path.dirname(tmp_entry_path), exist_ok=True) with open(tmp_entry_path, "wb") as f: with entry.as_mmap() as mm: f.write(mm) return cls.from_pretrained(os.path.dirname(tmp_entry_path), **kwargs) def _load_transformers_model_from_dduf( cls: "PreTrainedModel", name: str, dduf_entries: Dict[str, DDUFEntry], **kwargs ) -> "PreTrainedModel": """ Load a transformers model from a DDUF archive. In practice, `transformers` do not provide a way to load a model from a DDUF archive. This function is a workaround by instantiating a model from the config file and loading the weights from the DDUF archive directly. """ config_file = dduf_entries.get(f"{name}/config.json") if config_file is None: raise EnvironmentError( f"Could not find a config.json file for component {name} in DDUF file (contains {dduf_entries.keys()})." ) generation_config = dduf_entries.get(f"{name}/generation_config.json", None) weight_files = [ entry for entry_name, entry in dduf_entries.items() if entry_name.startswith(f"{name}/") and entry_name.endswith(".safetensors") ] if not weight_files: raise EnvironmentError( f"Could not find any weight file for component {name} in DDUF file (contains {dduf_entries.keys()})." ) if not is_safetensors_available(): raise EnvironmentError( "Safetensors is not available, cannot load model from DDUF. Please `pip install safetensors`." ) if is_transformers_version("<", "4.47.0"): raise ImportError( "You need to install `transformers>4.47.0` in order to load a transformers model from a DDUF file. " "You can install it with: `pip install --upgrade transformers`" ) with tempfile.TemporaryDirectory() as tmp_dir: from transformers import AutoConfig, GenerationConfig tmp_config_file = os.path.join(tmp_dir, "config.json") with open(tmp_config_file, "w") as f: f.write(config_file.read_text()) config = AutoConfig.from_pretrained(tmp_config_file) if generation_config is not None: tmp_generation_config_file = os.path.join(tmp_dir, "generation_config.json") with open(tmp_generation_config_file, "w") as f: f.write(generation_config.read_text()) generation_config = GenerationConfig.from_pretrained(tmp_generation_config_file) state_dict = {} with contextlib.ExitStack() as stack: for entry in tqdm(weight_files, desc="Loading state_dict"): # Loop over safetensors files # Memory-map the safetensors file mmap = stack.enter_context(entry.as_mmap()) # Load tensors from the memory-mapped file tensors = safetensors.torch.load(mmap) # Update the state dictionary with tensors state_dict.update(tensors) return cls.from_pretrained( pretrained_model_name_or_path=None, config=config, generation_config=generation_config, state_dict=state_dict, **kwargs, )
diffusers/src/diffusers/pipelines/transformers_loading_utils.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/transformers_loading_utils.py", "repo_id": "diffusers", "token_count": 2082 }
183
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Adapted from https://github.com/huggingface/transformers/blob/c409cd81777fb27aadc043ed3d8339dbc020fb3b/src/transformers/quantizers/quantizer_bnb_4bit.py """ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from ...utils import get_module_from_name from ..base import DiffusersQuantizer if TYPE_CHECKING: from ...models.modeling_utils import ModelMixin from ...utils import ( is_accelerate_available, is_accelerate_version, is_bitsandbytes_available, is_bitsandbytes_version, is_torch_available, logging, ) if is_torch_available(): import torch logger = logging.get_logger(__name__) class BnB4BitDiffusersQuantizer(DiffusersQuantizer): """ 4-bit quantization from bitsandbytes.py quantization method: before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear4bit into 4bit at the first .cuda() call saving: from state dict, as usual; saves weights and `quant_state` components loading: need to locate `quant_state` components and pass to Param4bit constructor """ use_keep_in_fp32_modules = True requires_calibration = False def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules def validate_environment(self, *args, **kwargs): if not (torch.cuda.is_available() or torch.xpu.is_available()): raise RuntimeError("No GPU found. A GPU is needed for quantization.") if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"): raise ImportError( "Using `bitsandbytes` 4-bit quantization requires Accelerate: `pip install 'accelerate>=0.26.0'`" ) if not is_bitsandbytes_available() or is_bitsandbytes_version("<", "0.43.3"): raise ImportError( "Using `bitsandbytes` 4-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`" ) if kwargs.get("from_flax", False): raise ValueError( "Converting into 4-bit weights from flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) device_map = kwargs.get("device_map", None) if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_no_convert = { key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert } if "cpu" in device_map_without_no_convert.values() or "disk" in device_map_without_no_convert.values(): raise ValueError( "Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the " "quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules " "in 32-bit, you need to set `load_in_8bit_fp32_cpu_offload=True` and pass a custom `device_map` to " "`from_pretrained`. Check " "https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu " "for more details. " ) def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if target_dtype != torch.int8: from accelerate.utils import CustomDtype logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization") return CustomDtype.INT4 else: raise ValueError(f"Wrong `target_dtype` ({target_dtype}) provided.") def check_if_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ) -> bool: import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit): # Add here check for loaded components' dtypes once serialization is implemented return True elif isinstance(module, bnb.nn.Linear4bit) and tensor_name == "bias": # bias could be loaded by regular set_module_tensor_to_device() from accelerate, # but it would wrongly use uninitialized weight there. return True else: return False def create_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, **kwargs, ): import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name) if tensor_name == "bias": if param_value is None: new_value = old_value.to(target_device) else: new_value = param_value.to(target_device) new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad) module._parameters[tensor_name] = new_value return if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit): raise ValueError("this function only loads `Linear4bit components`") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") # construct `new_value` for the module._parameters[tensor_name]: if self.pre_quantized: # 4bit loading. Collecting components for restoring quantized weight # This can be expanded to make a universal call for any quantized weight loading if not self.is_serializable: raise ValueError( "Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and ( param_name + ".quant_state.bitsandbytes__nf4" not in state_dict ): raise ValueError( f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components." ) quantized_stats = {} for k, v in state_dict.items(): # `startswith` to counter for edge cases where `param_name` # substring can be present in multiple places in the `state_dict` if param_name + "." in k and k.startswith(param_name): quantized_stats[k] = v if unexpected_keys is not None and k in unexpected_keys: unexpected_keys.remove(k) new_value = bnb.nn.Params4bit.from_prequantized( data=param_value, quantized_stats=quantized_stats, requires_grad=False, device=target_device, ) else: new_value = param_value.to("cpu") kwargs = old_value.__dict__ new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value def check_quantized_param_shape(self, param_name, current_param, loaded_param): current_param_shape = current_param.shape loaded_param_shape = loaded_param.shape n = current_param_shape.numel() inferred_shape = (n,) if "bias" in param_name else ((n + 1) // 2, 1) if loaded_param_shape != inferred_shape: raise ValueError( f"Expected the flattened shape of the current param ({param_name}) to be {loaded_param_shape} but is {inferred_shape}." ) else: return True def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.float16 to remove this warning.", torch_dtype, ) torch_dtype = torch.float16 return torch_dtype def update_device_map(self, device_map): if device_map is None: if torch.xpu.is_available(): current_device = f"xpu:{torch.xpu.current_device()}" else: current_device = f"cuda:{torch.cuda.current_device()}" device_map = {"": current_device} logger.info( "The device_map was not initialized. " "Setting device_map to {" ": {current_device}}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map def _process_model_before_weight_loading( self, model: "ModelMixin", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from .utils import replace_with_bnb_linear load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload # We may keep some modules such as the `proj_out` in their original dtype for numerical stability reasons self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu) # Purge `None`. # Unlike `transformers`, we don't know if we should always keep certain modules in FP32 # in case of diffusion transformer models. For language models and others alike, `lm_head` # and tied modules are usually kept in FP32. self.modules_to_not_convert = [module for module in self.modules_to_not_convert if module is not None] model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) model.config.quantization_config = self.quantization_config model.is_loaded_in_4bit = True def _process_model_after_weight_loading(self, model: "ModelMixin", **kwargs): model.is_4bit_serializable = self.is_serializable return model @property def is_serializable(self): # Because we're mandating `bitsandbytes` 0.43.3. return True @property def is_trainable(self) -> bool: # Because we're mandating `bitsandbytes` 0.43.3. return True def _dequantize(self, model): from .utils import dequantize_and_replace is_model_on_cpu = model.device.type == "cpu" if is_model_on_cpu: logger.info( "Model was found to be on CPU (could happen as a result of `enable_model_cpu_offload()`). So, moving it to GPU. After dequantization, will move the model back to CPU again to preserve the previous device." ) if torch.xpu.is_available(): model.to(torch.xpu.current_device()) else: model.to(torch.cuda.current_device()) model = dequantize_and_replace( model, self.modules_to_not_convert, quantization_config=self.quantization_config ) if is_model_on_cpu: model.to("cpu") return model class BnB8BitDiffusersQuantizer(DiffusersQuantizer): """ 8-bit quantization from bitsandbytes quantization method: before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call saving: from state dict, as usual; saves weights and 'SCB' component loading: need to locate SCB component and pass to the Linear8bitLt object """ use_keep_in_fp32_modules = True requires_calibration = False def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules def validate_environment(self, *args, **kwargs): if not (torch.cuda.is_available() or torch.xpu.is_available()): raise RuntimeError("No GPU found. A GPU is needed for quantization.") if not is_accelerate_available() or is_accelerate_version("<", "0.26.0"): raise ImportError( "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install 'accelerate>=0.26.0'`" ) if not is_bitsandbytes_available() or is_bitsandbytes_version("<", "0.43.3"): raise ImportError( "Using `bitsandbytes` 8-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`" ) if kwargs.get("from_flax", False): raise ValueError( "Converting into 8-bit weights from flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) device_map = kwargs.get("device_map", None) if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_no_convert = { key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert } if "cpu" in device_map_without_no_convert.values() or "disk" in device_map_without_no_convert.values(): raise ValueError( "Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the " "quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules " "in 32-bit, you need to set `load_in_8bit_fp32_cpu_offload=True` and pass a custom `device_map` to " "`from_pretrained`. Check " "https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu " "for more details. " ) # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.adjust_max_memory def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.update_torch_dtype def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype": if torch_dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass" " torch_dtype=torch.float16 to remove this warning.", torch_dtype, ) torch_dtype = torch.float16 return torch_dtype # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.update_device_map def update_device_map(self, device_map): if device_map is None: if torch.xpu.is_available(): current_device = f"xpu:{torch.xpu.current_device()}" else: current_device = f"cuda:{torch.cuda.current_device()}" device_map = {"": current_device} logger.info( "The device_map was not initialized. " "Setting device_map to {" ": {current_device}}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if target_dtype != torch.int8: logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization") return torch.int8 def check_if_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs, ): import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params): if self.pre_quantized: if param_name.replace("weight", "SCB") not in state_dict.keys(): raise ValueError("Missing quantization component `SCB`") if param_value.dtype != torch.int8: raise ValueError( f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`." ) return True return False def create_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: Dict[str, Any], unexpected_keys: Optional[List[str]] = None, **kwargs, ): import bitsandbytes as bnb fp16_statistics_key = param_name.replace("weight", "SCB") fp16_weights_format_key = param_name.replace("weight", "weight_format") fp16_statistics = state_dict.get(fp16_statistics_key, None) fp16_weights_format = state_dict.get(fp16_weights_format_key, None) module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name) if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params): raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") new_value = param_value.to("cpu") if self.pre_quantized and not self.is_serializable: raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) kwargs = old_value.__dict__ new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value if fp16_statistics is not None: setattr(module.weight, "SCB", fp16_statistics.to(target_device)) if unexpected_keys is not None: unexpected_keys.remove(fp16_statistics_key) # We just need to pop the `weight_format` keys from the state dict to remove unneeded # messages. The correct format is correctly retrieved during the first forward pass. if fp16_weights_format is not None and unexpected_keys is not None: unexpected_keys.remove(fp16_weights_format_key) # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer._process_model_after_weight_loading with 4bit->8bit def _process_model_after_weight_loading(self, model: "ModelMixin", **kwargs): model.is_8bit_serializable = self.is_serializable return model # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer._process_model_before_weight_loading with 4bit->8bit def _process_model_before_weight_loading( self, model: "ModelMixin", device_map, keep_in_fp32_modules: List[str] = [], **kwargs, ): from .utils import replace_with_bnb_linear load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload # We may keep some modules such as the `proj_out` in their original dtype for numerical stability reasons self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu) # Purge `None`. # Unlike `transformers`, we don't know if we should always keep certain modules in FP32 # in case of diffusion transformer models. For language models and others alike, `lm_head` # and tied modules are usually kept in FP32. self.modules_to_not_convert = [module for module in self.modules_to_not_convert if module is not None] model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) model.config.quantization_config = self.quantization_config model.is_loaded_in_8bit = True @property # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.is_serializable def is_serializable(self): # Because we're mandating `bitsandbytes` 0.43.3. return True @property # Copied from diffusers.quantizers.bitsandbytes.bnb_quantizer.BnB4BitDiffusersQuantizer.is_serializable def is_trainable(self) -> bool: # Because we're mandating `bitsandbytes` 0.43.3. return True @property def is_compileable(self) -> bool: return True def _dequantize(self, model): from .utils import dequantize_and_replace model = dequantize_and_replace( model, self.modules_to_not_convert, quantization_config=self.quantization_config ) return model
diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py/0
{ "file_path": "diffusers/src/diffusers/quantizers/bitsandbytes/bnb_quantizer.py", "repo_id": "diffusers", "token_count": 11346 }
184
# Copyright 2025 Google Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils.torch_utils import randn_tensor from ..scheduling_utils import SchedulerMixin class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): """ `ScoreSdeVpScheduler` is a variance preserving stochastic differential equation (SDE) scheduler. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 2000): The number of diffusion steps to train the model. beta_min (`int`, defaults to 0.1): beta_max (`int`, defaults to 20): sampling_eps (`int`, defaults to 1e-3): The end value of sampling where timesteps decrease progressively from 1 to epsilon. """ order = 1 @register_to_config def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3): self.sigmas = None self.discrete_sigmas = None self.timesteps = None def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None): """ Sets the continuous timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device) def step_pred(self, score, x, t, generator=None): """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: score (): x (): t (): generator (`torch.Generator`, *optional*): A random number generator. """ if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score log_mean_coeff = -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) std = std.flatten() while len(std.shape) < len(score.shape): std = std.unsqueeze(-1) score = -score / std # compute dt = -1.0 / len(self.timesteps) beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) beta_t = beta_t.flatten() while len(beta_t.shape) < len(x.shape): beta_t = beta_t.unsqueeze(-1) drift = -0.5 * beta_t * x diffusion = torch.sqrt(beta_t) drift = drift - diffusion**2 * score x_mean = x + drift * dt # add noise noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype) x = x_mean + diffusion * math.sqrt(-dt) * noise return x, x_mean def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/deprecated/scheduling_sde_vp.py", "repo_id": "diffusers", "token_count": 1693 }
185
# Copyright 2025 TSAIL Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate, is_scipy_available from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput if is_scipy_available(): import scipy.stats # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr def rescale_zero_terminal_snr(betas): """ Rescales betas to have zero terminal SNR Based on https://huggingface.co/papers/2305.08891 (Algorithm 1) Args: betas (`torch.Tensor`): the betas that the scheduler is being initialized with. Returns: `torch.Tensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): """ `DPMSolverMultistepScheduler` is a fast dedicated high-order solver for diffusion ODEs. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. solver_order (`int`, defaults to 2): The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample), `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper), or `flow_prediction`. thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `algorithm_type="dpmsolver++"`. algorithm_type (`str`, defaults to `dpmsolver++`): Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) paper, and the `dpmsolver++` type implements the algorithms in the [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. solver_type (`str`, defaults to `midpoint`): Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. lower_order_final (`bool`, defaults to `True`): Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. euler_at_final (`bool`, defaults to `False`): Whether to use Euler's method in the final step. It is a trade-off between numerical stability and detail richness. This can stabilize the sampling of the SDE variant of DPMSolver for small number of inference steps, but sometimes may result in blurring. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. use_exponential_sigmas (`bool`, *optional*, defaults to `False`): Whether to use exponential sigmas for step sizes in the noise schedule during the sampling process. use_beta_sigmas (`bool`, *optional*, defaults to `False`): Whether to use beta sigmas for step sizes in the noise schedule during the sampling process. Refer to [Beta Sampling is All You Need](https://huggingface.co/papers/2407.12173) for more information. use_lu_lambdas (`bool`, *optional*, defaults to `False`): Whether to use the uniform-logSNR for step sizes proposed by Lu's DPM-Solver in the noise schedule during the sampling process. If `True`, the sigmas and time steps are determined according to a sequence of `lambda(t)`. use_flow_sigmas (`bool`, *optional*, defaults to `False`): Whether to use flow sigmas for step sizes in the noise schedule during the sampling process. flow_shift (`float`, *optional*, defaults to 1.0): The shift value for the timestep schedule for flow matching. final_sigmas_type (`str`, defaults to `"zero"`): The final `sigma` value for the noise schedule during the sampling process. If `"sigma_min"`, the final sigma is the same as the last sigma in the training schedule. If `zero`, the final sigma is set to 0. lambda_min_clipped (`float`, defaults to `-inf`): Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the cosine (`squaredcos_cap_v2`) noise schedule. variance_type (`str`, *optional*): Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output contains the predicted Gaussian variance. timestep_spacing (`str`, defaults to `"linspace"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, solver_order: int = 2, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, algorithm_type: str = "dpmsolver++", solver_type: str = "midpoint", lower_order_final: bool = True, euler_at_final: bool = False, use_karras_sigmas: Optional[bool] = False, use_exponential_sigmas: Optional[bool] = False, use_beta_sigmas: Optional[bool] = False, use_lu_lambdas: Optional[bool] = False, use_flow_sigmas: Optional[bool] = False, flow_shift: Optional[float] = 1.0, final_sigmas_type: Optional[str] = "zero", # "zero", "sigma_min" lambda_min_clipped: float = -float("inf"), variance_type: Optional[str] = None, timestep_spacing: str = "linspace", steps_offset: int = 0, rescale_betas_zero_snr: bool = False, use_dynamic_shifting: bool = False, time_shift_type: str = "exponential", ): if self.config.use_beta_sigmas and not is_scipy_available(): raise ImportError("Make sure to install scipy if you want to use beta sigmas.") if sum([self.config.use_beta_sigmas, self.config.use_exponential_sigmas, self.config.use_karras_sigmas]) > 1: raise ValueError( "Only one of `config.use_beta_sigmas`, `config.use_exponential_sigmas`, `config.use_karras_sigmas` can be used." ) if algorithm_type in ["dpmsolver", "sde-dpmsolver"]: deprecation_message = f"algorithm_type {algorithm_type} is deprecated and will be removed in a future version. Choose from `dpmsolver++` or `sde-dpmsolver++` instead" deprecate("algorithm_types dpmsolver and sde-dpmsolver", "1.0.0", deprecation_message) if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) if rescale_betas_zero_snr: # Close to 0 without being 0 so first sigma is not inf # FP16 smallest positive subnormal works well here self.alphas_cumprod[-1] = 2**-24 # Currently we only support VP-type noise schedule self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) self.sigmas = ((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # settings for DPM-Solver if algorithm_type not in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: if algorithm_type == "deis": self.register_to_config(algorithm_type="dpmsolver++") else: raise NotImplementedError(f"{algorithm_type} is not implemented for {self.__class__}") if solver_type not in ["midpoint", "heun"]: if solver_type in ["logrho", "bh1", "bh2"]: self.register_to_config(solver_type="midpoint") else: raise NotImplementedError(f"{solver_type} is not implemented for {self.__class__}") if algorithm_type not in ["dpmsolver++", "sde-dpmsolver++"] and final_sigmas_type == "zero": raise ValueError( f"`final_sigmas_type` {final_sigmas_type} is not supported for `algorithm_type` {algorithm_type}. Please choose `sigma_min` instead." ) # setable values self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.lower_order_nums = 0 self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication @property def step_index(self): """ The index counter for current timestep. It will increase 1 after each scheduler step. """ return self._step_index @property def begin_index(self): """ The index for the first timestep. It should be set from pipeline with `set_begin_index` method. """ return self._begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`): The begin index for the scheduler. """ self._begin_index = begin_index def set_timesteps( self, num_inference_steps: int = None, device: Union[str, torch.device] = None, mu: Optional[float] = None, timesteps: Optional[List[int]] = None, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary timesteps schedule. If `None`, timesteps will be generated based on the `timestep_spacing` attribute. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`, and `timestep_spacing` attribute will be ignored. """ if mu is not None: assert self.config.use_dynamic_shifting and self.config.time_shift_type == "exponential" self.config.flow_shift = np.exp(mu) if num_inference_steps is None and timesteps is None: raise ValueError("Must pass exactly one of `num_inference_steps` or `timesteps`.") if num_inference_steps is not None and timesteps is not None: raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") if timesteps is not None and self.config.use_karras_sigmas: raise ValueError("Cannot use `timesteps` with `config.use_karras_sigmas = True`") if timesteps is not None and self.config.use_lu_lambdas: raise ValueError("Cannot use `timesteps` with `config.use_lu_lambdas = True`") if timesteps is not None and self.config.use_exponential_sigmas: raise ValueError("Cannot set `timesteps` with `config.use_exponential_sigmas = True`.") if timesteps is not None and self.config.use_beta_sigmas: raise ValueError("Cannot set `timesteps` with `config.use_beta_sigmas = True`.") if timesteps is not None: timesteps = np.array(timesteps).astype(np.int64) else: # Clipping the minimum of all lambda(t) for numerical stability. # This is critical for cosine (squaredcos_cap_v2) noise schedule. clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) last_timestep = ((self.config.num_train_timesteps - clipped_idx).numpy()).item() # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://huggingface.co/papers/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = ( np.linspace(0, last_timestep - 1, num_inference_steps + 1) .round()[::-1][:-1] .copy() .astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = last_timestep // (num_inference_steps + 1) # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = ( (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(np.int64) ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.arange(last_timestep, 0, -step_ratio).round().copy().astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) log_sigmas = np.log(sigmas) if self.config.use_karras_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) if self.config.beta_schedule != "squaredcos_cap_v2": timesteps = timesteps.round() elif self.config.use_lu_lambdas: lambdas = np.flip(log_sigmas.copy()) lambdas = self._convert_to_lu(in_lambdas=lambdas, num_inference_steps=num_inference_steps) sigmas = np.exp(lambdas) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) if self.config.beta_schedule != "squaredcos_cap_v2": timesteps = timesteps.round() elif self.config.use_exponential_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) elif self.config.use_beta_sigmas: sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) elif self.config.use_flow_sigmas: alphas = np.linspace(1, 1 / self.config.num_train_timesteps, num_inference_steps + 1) sigmas = 1.0 - alphas sigmas = np.flip(self.config.flow_shift * sigmas / (1 + (self.config.flow_shift - 1) * sigmas))[:-1].copy() timesteps = (sigmas * self.config.num_train_timesteps).copy() else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) if self.config.final_sigmas_type == "sigma_min": sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 elif self.config.final_sigmas_type == "zero": sigma_last = 0 else: raise ValueError( f"`final_sigmas_type` must be one of 'zero', or 'sigma_min', but got {self.config.final_sigmas_type}" ) sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.num_inference_steps = len(timesteps) self.model_outputs = [ None, ] * self.config.solver_order self.lower_order_nums = 0 # add an index counter for schedulers that allow duplicated timesteps self._step_index = None self._begin_index = None self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://huggingface.co/papers/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(np.maximum(sigma, 1e-10)) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t def _sigma_to_alpha_sigma_t(self, sigma): if self.config.use_flow_sigmas: alpha_t = 1 - sigma sigma_t = sigma else: alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor: """Constructs the noise schedule of Karras et al. (2022).""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def _convert_to_lu(self, in_lambdas: torch.Tensor, num_inference_steps) -> torch.Tensor: """Constructs the noise schedule of Lu et al. (2022).""" lambda_min: float = in_lambdas[-1].item() lambda_max: float = in_lambdas[0].item() rho = 1.0 # 1.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = lambda_min ** (1 / rho) max_inv_rho = lambda_max ** (1 / rho) lambdas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return lambdas # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor: """Constructs an exponential noise schedule.""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps)) return sigmas # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta def _convert_to_beta( self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6 ) -> torch.Tensor: """From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)""" # Hack to make sure that other schedulers which copy this function don't break # TODO: Add this logic to the other schedulers if hasattr(self.config, "sigma_min"): sigma_min = self.config.sigma_min else: sigma_min = None if hasattr(self.config, "sigma_max"): sigma_max = self.config.sigma_max else: sigma_max = None sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item() sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item() sigmas = np.array( [ sigma_min + (ppf * (sigma_max - sigma_min)) for ppf in [ scipy.stats.beta.ppf(timestep, alpha, beta) for timestep in 1 - np.linspace(0, 1, num_inference_steps) ] ] ) return sigmas def convert_model_output( self, model_output: torch.Tensor, *args, sample: torch.Tensor = None, **kwargs, ) -> torch.Tensor: """ Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. <Tip> The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise prediction and data prediction models. </Tip> Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. Returns: `torch.Tensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyword argument") if timestep is not None: deprecate( "timesteps", "1.0.0", "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) # DPM-Solver++ needs to solve an integral of the data prediction model. if self.config.algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: if self.config.prediction_type == "epsilon": # DPM-Solver and DPM-Solver++ only need the "mean" output. if self.config.variance_type in ["learned", "learned_range"]: model_output = model_output[:, :3] sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * model_output) / alpha_t elif self.config.prediction_type == "sample": x0_pred = model_output elif self.config.prediction_type == "v_prediction": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = alpha_t * sample - sigma_t * model_output elif self.config.prediction_type == "flow_prediction": sigma_t = self.sigmas[self.step_index] x0_pred = sample - sigma_t * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, " "`v_prediction`, or `flow_prediction` for the DPMSolverMultistepScheduler." ) if self.config.thresholding: x0_pred = self._threshold_sample(x0_pred) return x0_pred # DPM-Solver needs to solve an integral of the noise prediction model. elif self.config.algorithm_type in ["dpmsolver", "sde-dpmsolver"]: if self.config.prediction_type == "epsilon": # DPM-Solver and DPM-Solver++ only need the "mean" output. if self.config.variance_type in ["learned", "learned_range"]: epsilon = model_output[:, :3] else: epsilon = model_output elif self.config.prediction_type == "sample": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) epsilon = (sample - alpha_t * model_output) / sigma_t elif self.config.prediction_type == "v_prediction": sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) epsilon = alpha_t * model_output + sigma_t * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction` for the DPMSolverMultistepScheduler." ) if self.config.thresholding: sigma = self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) x0_pred = (sample - sigma_t * epsilon) / alpha_t x0_pred = self._threshold_sample(x0_pred) epsilon = (sample - alpha_t * x0_pred) / sigma_t return epsilon def dpm_solver_first_order_update( self, model_output: torch.Tensor, *args, sample: torch.Tensor = None, noise: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: """ One step for the first-order DPMSolver (equivalent to DDIM). Args: model_output (`torch.Tensor`): The direct output from the learned diffusion model. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. Returns: `torch.Tensor`: The sample tensor at the previous timestep. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError("missing `sample` as a required keyword argument") if timestep is not None: deprecate( "timesteps", "1.0.0", "Passing `timesteps` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s = self.sigmas[self.step_index + 1], self.sigmas[self.step_index] alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s, sigma_s = self._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s if self.config.algorithm_type == "dpmsolver++": x_t = (sigma_t / sigma_s) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * model_output elif self.config.algorithm_type == "dpmsolver": x_t = (alpha_t / alpha_s) * sample - (sigma_t * (torch.exp(h) - 1.0)) * model_output elif self.config.algorithm_type == "sde-dpmsolver++": assert noise is not None x_t = ( (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.algorithm_type == "sde-dpmsolver": assert noise is not None x_t = ( (alpha_t / alpha_s) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * model_output + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) return x_t def multistep_dpm_solver_second_order_update( self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor = None, noise: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: """ One step for the second-order multistep DPMSolver. Args: model_output_list (`List[torch.Tensor]`): The direct outputs from learned diffusion model at current and latter timesteps. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. Returns: `torch.Tensor`: The sample tensor at the previous timestep. """ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError("missing `sample` as a required keyword argument") if timestep_list is not None: deprecate( "timestep_list", "1.0.0", "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s0, sigma_s1 = ( self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) m0, m1 = model_output_list[-1], model_output_list[-2] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) if self.config.algorithm_type == "dpmsolver++": # See https://huggingface.co/papers/2211.01095 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 - 0.5 * (alpha_t * (torch.exp(-h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 ) elif self.config.algorithm_type == "dpmsolver": # See https://huggingface.co/papers/2206.00927 for detailed derivations if self.config.solver_type == "midpoint": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - 0.5 * (sigma_t * (torch.exp(h) - 1.0)) * D1 ) elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 ) elif self.config.algorithm_type == "sde-dpmsolver++": assert noise is not None if self.config.solver_type == "midpoint": x_t = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.solver_type == "heun": x_t = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) elif self.config.algorithm_type == "sde-dpmsolver": assert noise is not None if self.config.solver_type == "midpoint": x_t = ( (alpha_t / alpha_s0) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * (torch.exp(h) - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) elif self.config.solver_type == "heun": x_t = ( (alpha_t / alpha_s0) * sample - 2.0 * (sigma_t * (torch.exp(h) - 1.0)) * D0 - 2.0 * (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 + sigma_t * torch.sqrt(torch.exp(2 * h) - 1.0) * noise ) return x_t def multistep_dpm_solver_third_order_update( self, model_output_list: List[torch.Tensor], *args, sample: torch.Tensor = None, noise: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: """ One step for the third-order multistep DPMSolver. Args: model_output_list (`List[torch.Tensor]`): The direct outputs from learned diffusion model at current and latter timesteps. sample (`torch.Tensor`): A current instance of a sample created by diffusion process. Returns: `torch.Tensor`: The sample tensor at the previous timestep. """ timestep_list = args[0] if len(args) > 0 else kwargs.pop("timestep_list", None) prev_timestep = args[1] if len(args) > 1 else kwargs.pop("prev_timestep", None) if sample is None: if len(args) > 2: sample = args[2] else: raise ValueError("missing `sample` as a required keyword argument") if timestep_list is not None: deprecate( "timestep_list", "1.0.0", "Passing `timestep_list` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) if prev_timestep is not None: deprecate( "prev_timestep", "1.0.0", "Passing `prev_timestep` is deprecated and has no effect as model output conversion is now handled via an internal counter `self.step_index`", ) sigma_t, sigma_s0, sigma_s1, sigma_s2 = ( self.sigmas[self.step_index + 1], self.sigmas[self.step_index], self.sigmas[self.step_index - 1], self.sigmas[self.step_index - 2], ) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = self._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = self._sigma_to_alpha_sigma_t(sigma_s1) alpha_s2, sigma_s2 = self._sigma_to_alpha_sigma_t(sigma_s2) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) lambda_s2 = torch.log(alpha_s2) - torch.log(sigma_s2) m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3] h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2 r0, r1 = h_0 / h, h_1 / h D0 = m0 D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2) D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) if self.config.algorithm_type == "dpmsolver++": # See https://huggingface.co/papers/2206.00927 for detailed derivations x_t = ( (sigma_t / sigma_s0) * sample - (alpha_t * (torch.exp(-h) - 1.0)) * D0 + (alpha_t * ((torch.exp(-h) - 1.0) / h + 1.0)) * D1 - (alpha_t * ((torch.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2 ) elif self.config.algorithm_type == "dpmsolver": # See https://huggingface.co/papers/2206.00927 for detailed derivations x_t = ( (alpha_t / alpha_s0) * sample - (sigma_t * (torch.exp(h) - 1.0)) * D0 - (sigma_t * ((torch.exp(h) - 1.0) / h - 1.0)) * D1 - (sigma_t * ((torch.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2 ) elif self.config.algorithm_type == "sde-dpmsolver++": assert noise is not None x_t = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1.0 - torch.exp(-2.0 * h))) * D0 + (alpha_t * ((1.0 - torch.exp(-2.0 * h)) / (-2.0 * h) + 1.0)) * D1 + (alpha_t * ((1.0 - torch.exp(-2.0 * h) - 2.0 * h) / (2.0 * h) ** 2 - 0.5)) * D2 + sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) * noise ) return x_t def index_for_timestep(self, timestep, schedule_timesteps=None): if schedule_timesteps is None: schedule_timesteps = self.timesteps index_candidates = (schedule_timesteps == timestep).nonzero() if len(index_candidates) == 0: step_index = len(self.timesteps) - 1 # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) elif len(index_candidates) > 1: step_index = index_candidates[1].item() else: step_index = index_candidates[0].item() return step_index def _init_step_index(self, timestep): """ Initialize the step_index counter for the scheduler. """ if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def step( self, model_output: torch.Tensor, timestep: Union[int, torch.Tensor], sample: torch.Tensor, generator=None, variance_noise: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with the multistep DPMSolver. Args: model_output (`torch.Tensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.Tensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. variance_noise (`torch.Tensor`): Alternative to generating noise with `generator` by directly providing the noise for the variance itself. Useful for methods such as [`LEdits++`]. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) # Improve numerical stability for small number of steps lower_order_final = (self.step_index == len(self.timesteps) - 1) and ( self.config.euler_at_final or (self.config.lower_order_final and len(self.timesteps) < 15) or self.config.final_sigmas_type == "zero" ) lower_order_second = ( (self.step_index == len(self.timesteps) - 2) and self.config.lower_order_final and len(self.timesteps) < 15 ) model_output = self.convert_model_output(model_output, sample=sample) for i in range(self.config.solver_order - 1): self.model_outputs[i] = self.model_outputs[i + 1] self.model_outputs[-1] = model_output # Upcast to avoid precision issues when computing prev_sample sample = sample.to(torch.float32) if self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"] and variance_noise is None: noise = randn_tensor( model_output.shape, generator=generator, device=model_output.device, dtype=torch.float32 ) elif self.config.algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: noise = variance_noise.to(device=model_output.device, dtype=torch.float32) else: noise = None if self.config.solver_order == 1 or self.lower_order_nums < 1 or lower_order_final: prev_sample = self.dpm_solver_first_order_update(model_output, sample=sample, noise=noise) elif self.config.solver_order == 2 or self.lower_order_nums < 2 or lower_order_second: prev_sample = self.multistep_dpm_solver_second_order_update(self.model_outputs, sample=sample, noise=noise) else: prev_sample = self.multistep_dpm_solver_third_order_update(self.model_outputs, sample=sample, noise=noise) if self.lower_order_nums < self.config.solver_order: self.lower_order_nums += 1 # Cast sample back to expected dtype prev_sample = prev_sample.to(model_output.dtype) # upon completion increase step index by one self._step_index += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.Tensor, *args, **kwargs) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. Returns: `torch.Tensor`: A scaled input sample. """ return sample def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor, ) -> torch.Tensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): # mps does not support float64 schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) timesteps = timesteps.to(original_samples.device, dtype=torch.float32) else: schedule_timesteps = self.timesteps.to(original_samples.device) timesteps = timesteps.to(original_samples.device) # begin_index is None when the scheduler is used for training or pipeline does not implement set_begin_index if self.begin_index is None: step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timesteps] elif self.step_index is not None: # add_noise is called after first denoising step (for inpainting) step_indices = [self.step_index] * timesteps.shape[0] else: # add noise is called before first denoising step to create initial latent(img2img) step_indices = [self.begin_index] * timesteps.shape[0] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): sigma = sigma.unsqueeze(-1) alpha_t, sigma_t = self._sigma_to_alpha_sigma_t(sigma) noisy_samples = alpha_t * original_samples + sigma_t * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py", "repo_id": "diffusers", "token_count": 25871 }
186
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from huggingface_hub.utils import validate_hf_hub_args from ..utils import BaseOutput, PushToHubMixin SCHEDULER_CONFIG_NAME = "scheduler_config.json" # NOTE: We make this type an enum because it simplifies usage in docs and prevents # circular imports when used for `_compatibles` within the schedulers module. # When it's used as a type in pipelines, it really is a Union because the actual # scheduler instance is passed in. class FlaxKarrasDiffusionSchedulers(Enum): FlaxDDIMScheduler = 1 FlaxDDPMScheduler = 2 FlaxPNDMScheduler = 3 FlaxLMSDiscreteScheduler = 4 FlaxDPMSolverMultistepScheduler = 5 FlaxEulerDiscreteScheduler = 6 @dataclass class FlaxSchedulerOutput(BaseOutput): """ Base class for the scheduler's step function output. Args: prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. """ prev_sample: jnp.ndarray class FlaxSchedulerMixin(PushToHubMixin): """ Mixin containing common functions for the schedulers. Class attributes: - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that `from_config` can be used from a class different than the one used to save the config (should be overridden by parent class). """ config_name = SCHEDULER_CONFIG_NAME ignore_for_config = ["dtype"] _compatibles = [] has_compatibles = True @classmethod @validate_hf_hub_args def from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None, subfolder: Optional[str] = None, return_unused_kwargs=False, **kwargs, ): r""" Instantiate a Scheduler class from a pre-defined JSON-file. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an organization name, like `google/ddpm-celebahq-256`. - A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`], e.g., `./my_model_directory/`. subfolder (`str`, *optional*): In case the relevant files are located inside a subfolder of the model repo (either remote in huggingface.co or downloaded locally), you can specify the folder name here. return_unused_kwargs (`bool`, *optional*, defaults to `False`): Whether kwargs that are not consumed by the Python class should be returned or not. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. <Tip> It is required to be logged in (`hf auth login`) when you want to use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). </Tip> <Tip> Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> """ config, kwargs = cls.load_config( pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, return_unused_kwargs=True, **kwargs, ) scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs) if hasattr(scheduler, "create_state") and getattr(scheduler, "has_state", False): state = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs): """ Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the [`~FlaxSchedulerMixin.from_pretrained`] class method. Args: save_directory (`str` or `os.PathLike`): Directory where the configuration JSON file will be saved (will be created if it does not exist). push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs) @property def compatibles(self): """ Returns all schedulers that are compatible with this scheduler Returns: `List[SchedulerMixin]`: List of compatible schedulers """ return self._get_compatibles() @classmethod def _get_compatibles(cls): compatible_classes_str = list(set([cls.__name__] + cls._compatibles)) diffusers_library = importlib.import_module(__name__.split(".")[0]) compatible_classes = [ getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c) ] return compatible_classes def broadcast_to_shape_from_left(x: jnp.ndarray, shape: Tuple[int]) -> jnp.ndarray: assert len(shape) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(shape) - x.ndim)), shape) def betas_for_alpha_bar(num_diffusion_timesteps: int, max_beta=0.999, dtype=jnp.float32) -> jnp.ndarray: """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. Returns: betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs """ def alpha_bar(time_step): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return jnp.array(betas, dtype=dtype) @flax.struct.dataclass class CommonSchedulerState: alphas: jnp.ndarray betas: jnp.ndarray alphas_cumprod: jnp.ndarray @classmethod def create(cls, scheduler): config = scheduler.config if config.trained_betas is not None: betas = jnp.asarray(config.trained_betas, dtype=scheduler.dtype) elif config.beta_schedule == "linear": betas = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. betas = ( jnp.linspace( config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule betas = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype) else: raise NotImplementedError( f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" ) alphas = 1.0 - betas alphas_cumprod = jnp.cumprod(alphas, axis=0) return cls( alphas=alphas, betas=betas, alphas_cumprod=alphas_cumprod, ) def get_sqrt_alpha_prod( state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray ): alphas_cumprod = state.alphas_cumprod sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() sqrt_alpha_prod = broadcast_to_shape_from_left(sqrt_alpha_prod, original_samples.shape) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() sqrt_one_minus_alpha_prod = broadcast_to_shape_from_left(sqrt_one_minus_alpha_prod, original_samples.shape) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def add_noise_common( state: CommonSchedulerState, original_samples: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray ): sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, original_samples, noise, timesteps) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def get_velocity_common(state: CommonSchedulerState, sample: jnp.ndarray, noise: jnp.ndarray, timesteps: jnp.ndarray): sqrt_alpha_prod, sqrt_one_minus_alpha_prod = get_sqrt_alpha_prod(state, sample, noise, timesteps) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
diffusers/src/diffusers/schedulers/scheduling_utils_flax.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_utils_flax.py", "repo_id": "diffusers", "token_count": 4945 }
187
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class AudioDiffusionPipeline(metaclass=DummyObject): _backends = ["torch", "librosa"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "librosa"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "librosa"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "librosa"]) class Mel(metaclass=DummyObject): _backends = ["torch", "librosa"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "librosa"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "librosa"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "librosa"])
diffusers/src/diffusers/utils/dummy_torch_and_librosa_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_librosa_objects.py", "repo_id": "diffusers", "token_count": 397 }
188
--- {{ card_data }} --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> {{ model_description }} ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training details [TODO: describe the data used to train the model]
diffusers/src/diffusers/utils/model_card_template.md/0
{ "file_path": "diffusers/src/diffusers/utils/model_card_template.md", "repo_id": "diffusers", "token_count": 138 }
189
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanTransformer3DModel, ) from diffusers.utils.testing_utils import ( floats_tensor, require_peft_backend, skip_mps, ) sys.path.append(".") from utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend @skip_mps class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = WanPipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_classes = [FlowMatchEulerDiscreteScheduler] scheduler_kwargs = {} transformer_kwargs = { "patch_size": (1, 2, 2), "num_attention_heads": 2, "attention_head_dim": 12, "in_channels": 16, "out_channels": 16, "text_dim": 32, "freq_dim": 256, "ffn_dim": 32, "num_layers": 2, "cross_attn_norm": True, "qk_norm": "rms_norm_across_heads", "rope_max_seq_len": 32, } transformer_cls = WanTransformer3DModel vae_kwargs = { "base_dim": 3, "z_dim": 16, "dim_mult": [1, 1, 1, 1], "num_res_blocks": 1, "temperal_downsample": [False, True, True], } vae_cls = AutoencoderKLWan has_two_text_encoders = True tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" text_encoder_target_modules = ["q", "k", "v", "o"] @property def output_shape(self): return (1, 9, 32, 32, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 16 num_channels = 4 num_frames = 9 num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 sizes = (4, 4) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "", "num_frames": num_frames, "num_inference_steps": 1, "guidance_scale": 6.0, "height": 32, "width": 32, "max_sequence_length": sequence_length, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs def test_simple_inference_with_text_lora_denoiser_fused_multi(self): super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) @unittest.skip("Not supported in Wan.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in Wan.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in Wan.") def test_modify_padding_mode(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_partial_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_text_lora_and_scale(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_text_lora_fused(self): pass @unittest.skip("Text encoder LoRA is not supported in Wan.") def test_simple_inference_with_text_lora_save_load(self): pass
diffusers/tests/lora/test_lora_layers_wan.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_wan.py", "repo_id": "diffusers", "token_count": 1957 }
190
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderTiny from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_hf_numpy, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderTinyTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderTiny main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_tiny_config(self, block_out_channels=None): block_out_channels = (len(block_out_channels) * [32]) if block_out_channels is not None else [32, 32] init_dict = { "in_channels": 3, "out_channels": 3, "encoder_block_out_channels": block_out_channels, "decoder_block_out_channels": block_out_channels, "num_encoder_blocks": [b // min(block_out_channels) for b in block_out_channels], "num_decoder_blocks": [b // min(block_out_channels) for b in reversed(block_out_channels)], } return init_dict @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_tiny_config() inputs_dict = self.dummy_input return init_dict, inputs_dict @unittest.skip("Model doesn't yet support smaller resolution.") def test_enable_disable_tiling(self): pass def test_enable_disable_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_slicing = model(**inputs_dict)[0] torch.manual_seed(0) model.enable_slicing() output_with_slicing = model(**inputs_dict)[0] self.assertLess( (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), 0.5, "VAE slicing should not affect the inference results", ) torch.manual_seed(0) model.disable_slicing() output_without_slicing_2 = model(**inputs_dict)[0] self.assertEqual( output_without_slicing.detach().cpu().numpy().all(), output_without_slicing_2.detach().cpu().numpy().all(), "Without slicing outputs should match with the outputs when slicing is manually disabled.", ) @unittest.skip("Test not supported.") def test_outputs_equivalence(self): pass @unittest.skip("Test not supported.") def test_forward_with_norm_groups(self): pass def test_gradient_checkpointing_is_applied(self): expected_set = {"DecoderTiny", "EncoderTiny"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) def test_effective_gradient_checkpointing(self): if not self.model_class._supports_gradient_checkpointing: return # Skip test if model does not support gradient checkpointing # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() inputs_dict_copy = copy.deepcopy(inputs_dict) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing torch.manual_seed(0) model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict_copy).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < 1e-3) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): if "encoder.layers" in name: continue self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=3e-2)) @unittest.skip( "The forward pass of AutoencoderTiny creates a torch.float32 tensor. This causes inference in compute_dtype=torch.bfloat16 to fail. To fix:\n" "1. Change the forward pass to be dtype agnostic.\n" "2. Unskip this test." ) def test_layerwise_casting_inference(self): pass @unittest.skip( "The forward pass of AutoencoderTiny creates a torch.float32 tensor. This causes inference in compute_dtype=torch.bfloat16 to fail. To fix:\n" "1. Change the forward pass to be dtype agnostic.\n" "2. Unskip this test." ) def test_layerwise_casting_memory(self): pass @slow class AutoencoderTinyIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="hf-internal-testing/taesd-diffusers", fp16=False): torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderTiny.from_pretrained(model_id, torch_dtype=torch_dtype) model.to(torch_device).eval() return model @parameterized.expand( [ [(1, 4, 73, 97), (1, 3, 584, 776)], [(1, 4, 97, 73), (1, 3, 776, 584)], [(1, 4, 49, 65), (1, 3, 392, 520)], [(1, 4, 65, 49), (1, 3, 520, 392)], [(1, 4, 49, 49), (1, 3, 392, 392)], ] ) def test_tae_tiling(self, in_shape, out_shape): model = self.get_sd_vae_model() model.enable_tiling() with torch.no_grad(): zeros = torch.zeros(in_shape).to(torch_device) dec = model.decode(zeros).sample assert dec.shape == out_shape def test_stable_diffusion(self): model = self.get_sd_vae_model() image = self.get_sd_image(seed=33) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor([0.0093, 0.6385, -0.1274, 0.1631, -0.1762, 0.5232, -0.3108, -0.0382]) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand([(True,), (False,)]) def test_tae_roundtrip(self, enable_tiling): # load the autoencoder model = self.get_sd_vae_model() if enable_tiling: model.enable_tiling() # make a black image with a white square in the middle, # which is large enough to split across multiple tiles image = -torch.ones(1, 3, 1024, 1024, device=torch_device) image[..., 256:768, 256:768] = 1.0 # round-trip the image through the autoencoder with torch.no_grad(): sample = model(image).sample # the autoencoder reconstruction should match original image, sorta def downscale(x): return torch.nn.functional.avg_pool2d(x, model.spatial_scale_factor) assert torch_all_close(downscale(sample), downscale(image), atol=0.125)
diffusers/tests/models/autoencoders/test_models_autoencoder_tiny.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_autoencoder_tiny.py", "repo_id": "diffusers", "token_count": 4073 }
191
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import AllegroTransformer3DModel from diffusers.utils.testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class AllegroTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = AllegroTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 2 num_channels = 4 num_frames = 2 height = 8 width = 8 embedding_dim = 16 sequence_length = 16 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim // 2)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, } @property def input_shape(self): return (4, 2, 8, 8) @property def output_shape(self): return (4, 2, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { # Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings. "num_attention_heads": 2, "attention_head_dim": 8, "in_channels": 4, "out_channels": 4, "num_layers": 1, "cross_attention_dim": 16, "sample_width": 8, "sample_height": 8, "sample_frames": 8, "caption_channels": 8, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"AllegroTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
diffusers/tests/models/transformers/test_models_transformer_allegro.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_allegro.py", "repo_id": "diffusers", "token_count": 1079 }
192
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import LTXVideoTransformer3DModel from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin enable_full_determinism() class LTXTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = LTXVideoTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 2 num_channels = 4 num_frames = 2 height = 16 width = 16 embedding_dim = 16 sequence_length = 16 hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, "encoder_attention_mask": encoder_attention_mask, "num_frames": num_frames, "height": height, "width": width, } @property def input_shape(self): return (512, 4) @property def output_shape(self): return (512, 4) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 4, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 8, "cross_attention_dim": 16, "num_layers": 1, "qk_norm": "rms_norm_across_heads", "caption_channels": 16, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"LTXVideoTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class LTXTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = LTXVideoTransformer3DModel def prepare_init_args_and_inputs_for_common(self): return LTXTransformerTests().prepare_init_args_and_inputs_for_common()
diffusers/tests/models/transformers/test_models_transformer_ltx.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_ltx.py", "repo_id": "diffusers", "token_count": 1224 }
193
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers.models import ModelMixin, UNet3DConditionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() logger = logging.get_logger(__name__) @skip_mps class UNet3DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet3DConditionModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 num_frames = 4 sizes = (16, 16) noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 8)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 4, 16, 16) @property def output_shape(self): return (4, 4, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (4, 8), "norm_num_groups": 4, "down_block_types": ( "CrossAttnDownBlock3D", "DownBlock3D", ), "up_block_types": ("UpBlock3D", "CrossAttnUpBlock3D"), "cross_attention_dim": 8, "attention_head_dim": 2, "out_channels": 4, "in_channels": 4, "layers_per_block": 1, "sample_size": 16, } inputs_dict = self.dummy_input return init_dict, inputs_dict @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_enable_works(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.enable_xformers_memory_efficient_attention() assert ( model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ == "XFormersAttnProcessor" ), "xformers is not enabled" # Overriding to set `norm_num_groups` needs to be different for this model. def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (32, 64) init_dict["norm_num_groups"] = 32 model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") # Overriding since the UNet3D outputs a different structure. def test_determinism(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): # Warmup pass when using mps (see #372) if torch_device == "mps" and isinstance(model, ModelMixin): model(**self.dummy_input) first = model(**inputs_dict) if isinstance(first, dict): first = first.sample second = model(**inputs_dict) if isinstance(second, dict): second = second.sample out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def test_model_attention_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (16, 32) init_dict["attention_head_dim"] = 8 model = self.model_class(**init_dict) model.to(torch_device) model.eval() model.set_attention_slice("auto") with torch.no_grad(): output = model(**inputs_dict) assert output is not None model.set_attention_slice("max") with torch.no_grad(): output = model(**inputs_dict) assert output is not None model.set_attention_slice(2) with torch.no_grad(): output = model(**inputs_dict) assert output is not None def test_feed_forward_chunking(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["block_out_channels"] = (32, 64) init_dict["norm_num_groups"] = 32 model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict)[0] model.enable_forward_chunking() with torch.no_grad(): output_2 = model(**inputs_dict)[0] self.assertEqual(output.shape, output_2.shape, "Shape doesn't match") assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2
diffusers/tests/models/unets/test_models_unet_3d_condition.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_3d_condition.py", "repo_id": "diffusers", "token_count": 2728 }
194
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from pathlib import Path from tempfile import TemporaryDirectory from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card class CreateModelCardTest(unittest.TestCase): def test_generate_model_card_with_library_name(self): with TemporaryDirectory() as tmpdir: file_path = Path(tmpdir) / "README.md" file_path.write_text("---\nlibrary_name: foo\n---\nContent\n") model_card = load_or_create_model_card(file_path) populate_model_card(model_card) assert model_card.data.library_name == "foo"
diffusers/tests/others/test_hub_utils.py/0
{ "file_path": "diffusers/tests/others/test_hub_utils.py", "repo_id": "diffusers", "token_count": 399 }
195
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device enable_full_determinism() class DDPMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(4, 8), layers_per_block=1, norm_num_groups=4, sample_size=8, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def test_fast_inference(self): device = "cpu" unet = self.dummy_uncond_unet scheduler = DDPMScheduler() ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(device) ddpm.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="np", return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 8, 8, 3) expected_slice = np.array([0.0, 0.9996672, 0.00329116, 1.0, 0.9995991, 1.0, 0.0060907, 0.00115037, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_predict_sample(self): unet = self.dummy_uncond_unet scheduler = DDPMScheduler(prediction_type="sample") ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images generator = torch.manual_seed(0) image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="np")[0] image_slice = image[0, -3:, -3:, -1] image_eps_slice = image_eps[0, -3:, -3:, -1] assert image.shape == (1, 8, 8, 3) tolerance = 1e-2 if torch_device != "mps" else 3e-2 assert np.abs(image_slice.flatten() - image_eps_slice.flatten()).max() < tolerance @slow @require_torch_accelerator class DDPMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDPMScheduler.from_pretrained(model_id) ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, output_type="np").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4200, 0.3588, 0.1939, 0.3847, 0.3382, 0.2647, 0.4155, 0.3582, 0.3385]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/ddpm/test_ddpm.py/0
{ "file_path": "diffusers/tests/pipelines/ddpm/test_ddpm.py", "repo_id": "diffusers", "token_count": 1764 }
196
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, ) from diffusers import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, ) from diffusers.image_processor import IPAdapterMaskProcessor from diffusers.utils import load_image from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, is_flaky, load_pt, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) enable_full_determinism() class IPAdapterNightlyTestsMixin(unittest.TestCase): dtype = torch.float16 def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_image_encoder(self, repo_id, subfolder): image_encoder = CLIPVisionModelWithProjection.from_pretrained( repo_id, subfolder=subfolder, torch_dtype=self.dtype ).to(torch_device) return image_encoder def get_image_processor(self, repo_id): image_processor = CLIPImageProcessor.from_pretrained(repo_id) return image_processor def get_dummy_inputs( self, for_image_to_image=False, for_inpainting=False, for_sdxl=False, for_masks=False, for_instant_style=False ): image = load_image( "https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png" ) if for_sdxl: image = image.resize((1024, 1024)) input_kwargs = { "prompt": "best quality, high quality", "negative_prompt": "monochrome, lowres, bad anatomy, worst quality, low quality", "num_inference_steps": 5, "generator": torch.Generator(device="cpu").manual_seed(33), "ip_adapter_image": image, "output_type": "np", } if for_image_to_image: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/vermeer.jpg") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/river.png") if for_sdxl: image = image.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "ip_adapter_image": ip_image}) elif for_inpainting: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/inpaint_image.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/mask.png") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/girl.png") if for_sdxl: image = image.resize((1024, 1024)) mask = mask.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "mask_image": mask, "ip_adapter_image": ip_image}) elif for_masks: face_image1 = load_image( "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl1.png" ) face_image2 = load_image( "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl2.png" ) mask1 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask1.png") mask2 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask2.png") input_kwargs.update( { "ip_adapter_image": [[face_image1], [face_image2]], "cross_attention_kwargs": {"ip_adapter_masks": [mask1, mask2]}, } ) elif for_instant_style: composition_mask = load_image( "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/1024_whole_mask.png" ) female_mask = load_image( "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter_None_20240321125641_mask.png" ) male_mask = load_image( "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter_None_20240321125344_mask.png" ) background_mask = load_image( "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter_6_20240321130722_mask.png" ) ip_composition_image = load_image( "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter__20240321125152.png" ) ip_female_style = load_image( "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter__20240321125625.png" ) ip_male_style = load_image( "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter__20240321125329.png" ) ip_background = load_image( "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter__20240321130643.png" ) input_kwargs.update( { "ip_adapter_image": [ip_composition_image, [ip_female_style, ip_male_style, ip_background]], "cross_attention_kwargs": { "ip_adapter_masks": [[composition_mask], [female_mask, male_mask, background_mask]] }, } ) return input_kwargs @slow @require_torch_accelerator class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.80810547, 0.88183594, 0.9296875, 0.9189453, 0.9848633, 1.0, 0.97021484, 1.0, 1.0]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.30444336, 0.26513672, 0.22436523, 0.2758789, 0.25585938, 0.20751953, 0.25390625, 0.24633789, 0.21923828] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_image_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionImg2ImgPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.22167969, 0.21875, 0.21728516, 0.22607422, 0.21948242, 0.23925781, 0.22387695, 0.25268555, 0.2722168] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.35913086, 0.265625, 0.26367188, 0.24658203, 0.19750977, 0.39990234, 0.15258789, 0.20336914, 0.5517578] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_inpainting(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionInpaintPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.27148438, 0.24047852, 0.22167969, 0.23217773, 0.21118164, 0.21142578, 0.21875, 0.20751953, 0.20019531] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_text_to_image_model_cpu_offload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype, ) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.to(torch_device) inputs = self.get_dummy_inputs() output_without_offload = pipeline(**inputs).images pipeline.enable_model_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs() output_with_offload = pipeline(**inputs).images max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-3, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipeline.components.items() if isinstance(v, torch.nn.Module) and k not in pipeline._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) def test_text_to_image_full_face(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin") pipeline.set_ip_adapter_scale(0.7) inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.1704, 0.1296, 0.1272, 0.2212, 0.1514, 0.1479, 0.4172, 0.4263, 0.4360]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_unload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype, ) before_processors = [attn_proc.__class__ for attn_proc in pipeline.unet.attn_processors.values()] pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.set_ip_adapter_scale(0.7) pipeline.unload_ip_adapter() assert getattr(pipeline, "image_encoder") is None assert getattr(pipeline, "feature_extractor") is not None after_processors = [attn_proc.__class__ for attn_proc in pipeline.unet.attn_processors.values()] assert before_processors == after_processors @is_flaky def test_multi(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="models", weight_name=["ip-adapter_sd15.bin", "ip-adapter-plus_sd15.bin"] ) pipeline.set_ip_adapter_scale([0.7, 0.3]) inputs = self.get_dummy_inputs() ip_adapter_image = inputs["ip_adapter_image"] inputs["ip_adapter_image"] = [ip_adapter_image, [ip_adapter_image] * 2] images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.5234, 0.5352, 0.5625, 0.5713, 0.5947, 0.6206, 0.5786, 0.6187, 0.6494]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_text_to_image_face_id(self): pipeline = StableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter-FaceID", subfolder=None, weight_name="ip-adapter-faceid_sd15.bin", image_encoder_folder=None, ) pipeline.set_ip_adapter_scale(0.7) inputs = self.get_dummy_inputs() id_embeds = load_pt( "https://huggingface.co/datasets/fabiorigano/testing-images/resolve/main/ai_face2.ipadpt", map_location=torch_device, )[0] id_embeds = id_embeds.reshape((2, 1, 1, 512)) inputs["ip_adapter_image_embeds"] = [id_embeds] inputs["ip_adapter_image"] = None images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.3237, 0.3186, 0.3406, 0.3154, 0.2942, 0.3220, 0.3188, 0.3528, 0.3242]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 @slow @require_torch_accelerator class IPAdapterSDXLIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.09630299, 0.09551358, 0.08480701, 0.09070173, 0.09437338, 0.09264627, 0.08883232, 0.09287417, 0.09197289, ] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.0596, 0.0539, 0.0459, 0.0580, 0.0560, 0.0548, 0.0501, 0.0563, 0.0500]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_image_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.06513795, 0.07009393, 0.07234055, 0.07426041, 0.07002589, 0.06415862, 0.07827643, 0.07962808, 0.07411247, ] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [ 0.07126552, 0.07025367, 0.07348302, 0.07580167, 0.07467338, 0.06918576, 0.07480252, 0.08279955, 0.08547315, ] ) assert np.allclose(image_slice, expected_slice, atol=1e-3) def test_inpainting_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array( [0.14181179, 0.1493012, 0.14283323, 0.14602411, 0.14915377, 0.15015268, 0.14725655, 0.15009224, 0.15164584] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array([0.1398, 0.1476, 0.1407, 0.1442, 0.1470, 0.1480, 0.1449, 0.1481, 0.1494]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_ip_adapter_mask(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, torch_dtype=self.dtype, ) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus-face_sdxl_vit-h.safetensors" ) pipeline.set_ip_adapter_scale(0.7) inputs = self.get_dummy_inputs(for_masks=True) mask = inputs["cross_attention_kwargs"]["ip_adapter_masks"][0] processor = IPAdapterMaskProcessor() mask = processor.preprocess(mask) inputs["cross_attention_kwargs"]["ip_adapter_masks"] = mask inputs["ip_adapter_image"] = inputs["ip_adapter_image"][0] images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.7307304, 0.73450166, 0.73731124, 0.7377061, 0.7318013, 0.73720926, 0.74746597, 0.7409929, 0.74074936] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_ip_adapter_multiple_masks(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, torch_dtype=self.dtype, ) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"] * 2 ) pipeline.set_ip_adapter_scale([0.7] * 2) inputs = self.get_dummy_inputs(for_masks=True) masks = inputs["cross_attention_kwargs"]["ip_adapter_masks"] processor = IPAdapterMaskProcessor() masks = processor.preprocess(masks) inputs["cross_attention_kwargs"]["ip_adapter_masks"] = masks images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.79474676, 0.7977683, 0.8013954, 0.7988008, 0.7970615, 0.8029355, 0.80614823, 0.8050743, 0.80627424] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_instant_style_multiple_masks(self): image_encoder = CLIPVisionModelWithProjection.from_pretrained( "h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16 ) pipeline = StableDiffusionXLPipeline.from_pretrained( "RunDiffusion/Juggernaut-XL-v9", torch_dtype=torch.float16, image_encoder=image_encoder, variant="fp16" ) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.load_ip_adapter( ["ostris/ip-composition-adapter", "h94/IP-Adapter"], subfolder=["", "sdxl_models"], weight_name=[ "ip_plus_composition_sdxl.safetensors", "ip-adapter_sdxl_vit-h.safetensors", ], image_encoder_folder=None, ) scale_1 = { "down": [[0.0, 0.0, 1.0]], "mid": [[0.0, 0.0, 1.0]], "up": {"block_0": [[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [0.0, 0.0, 1.0]], "block_1": [[0.0, 0.0, 1.0]]}, } pipeline.set_ip_adapter_scale([1.0, scale_1]) inputs = self.get_dummy_inputs(for_instant_style=True) processor = IPAdapterMaskProcessor() masks1 = inputs["cross_attention_kwargs"]["ip_adapter_masks"][0] masks2 = inputs["cross_attention_kwargs"]["ip_adapter_masks"][1] masks1 = processor.preprocess(masks1, height=1024, width=1024) masks2 = processor.preprocess(masks2, height=1024, width=1024) masks2 = masks2.reshape(1, masks2.shape[0], masks2.shape[2], masks2.shape[3]) inputs["cross_attention_kwargs"]["ip_adapter_masks"] = [masks1, masks2] images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slices = Expectations( { ("xpu", 3): np.array( [ 0.2520, 0.1050, 0.1510, 0.0997, 0.0893, 0.0019, 0.0000, 0.0000, 0.0210, ] ), ("cuda", 7): np.array( [ 0.2323, 0.1026, 0.1338, 0.0638, 0.0662, 0.0000, 0.0000, 0.0000, 0.0199, ] ), ("cuda", 8): np.array( [ 0.2518, 0.1059, 0.1553, 0.0977, 0.0852, 0.0000, 0.0000, 0.0000, 0.0220, ] ), } ) expected_slice = expected_slices.get_expectation() max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4 def test_ip_adapter_multiple_masks_one_adapter(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, torch_dtype=self.dtype, ) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"] ) pipeline.set_ip_adapter_scale([[0.7, 0.7]]) inputs = self.get_dummy_inputs(for_masks=True) masks = inputs["cross_attention_kwargs"]["ip_adapter_masks"] processor = IPAdapterMaskProcessor() masks = processor.preprocess(masks) masks = masks.reshape(1, masks.shape[0], masks.shape[2], masks.shape[3]) inputs["cross_attention_kwargs"]["ip_adapter_masks"] = [masks] ip_images = inputs["ip_adapter_image"] inputs["ip_adapter_image"] = [[image[0] for image in ip_images]] images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.79474676, 0.7977683, 0.8013954, 0.7988008, 0.7970615, 0.8029355, 0.80614823, 0.8050743, 0.80627424] ) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 5e-4
diffusers/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py", "repo_id": "diffusers", "token_count": 15564 }
197
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DPMSolverMultistepScheduler, LEditsPPPipelineStableDiffusionXL, UNet2DConditionModel, ) # from diffusers.image_processor import VaeImageProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, require_torch_accelerator, skip_mps, slow, torch_device, ) enable_full_determinism() @skip_mps class LEditsPPPipelineStableDiffusionXLFastTests(unittest.TestCase): pipeline_class = LEditsPPPipelineStableDiffusionXL def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, time_cond_proj_dim=time_cond_proj_dim, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) scheduler = DPMSolverMultistepScheduler(algorithm_type="sde-dpmsolver++", solver_order=2) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, image_size=224, projection_dim=32, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": image_encoder, "feature_extractor": feature_extractor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "generator": generator, "editing_prompt": ["wearing glasses", "sunshine"], "reverse_editing_direction": [False, True], "edit_guidance_scale": [10.0, 5.0], } return inputs def get_dummy_inversion_inputs(self, device, seed=0): images = floats_tensor((2, 3, 32, 32), rng=random.Random(0)).cpu().permute(0, 2, 3, 1) images = 255 * images image_1 = Image.fromarray(np.uint8(images[0])).convert("RGB") image_2 = Image.fromarray(np.uint8(images[1])).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": [image_1, image_2], "source_prompt": "", "source_guidance_scale": 3.5, "num_inversion_steps": 20, "skip": 0.15, "generator": generator, } return inputs def test_ledits_pp_inversion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = LEditsPPPipelineStableDiffusionXL(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) inputs["image"] = inputs["image"][0] sd_pipe.invert(**inputs) assert sd_pipe.init_latents.shape == ( 1, 4, int(32 / sd_pipe.vae_scale_factor), int(32 / sd_pipe.vae_scale_factor), ) latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) expected_slice = np.array([-0.9084, -0.0367, 0.2940, 0.0839, 0.6890, 0.2651, -0.7103, 2.1090, -0.7821]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 def test_ledits_pp_inversion_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = LEditsPPPipelineStableDiffusionXL(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) sd_pipe.invert(**inputs) assert sd_pipe.init_latents.shape == ( 2, 4, int(32 / sd_pipe.vae_scale_factor), int(32 / sd_pipe.vae_scale_factor), ) latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) expected_slice = np.array([0.2528, 0.1458, -0.2166, 0.4565, -0.5656, -1.0286, -0.9961, 0.5933, 1.1172]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 latent_slice = sd_pipe.init_latents[1, -1, -3:, -3:].to(device) expected_slice = np.array([-0.0796, 2.0583, 0.5500, 0.5358, 0.0282, -0.2803, -1.0470, 0.7024, -0.0072]) assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 def test_ledits_pp_warmup_steps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LEditsPPPipelineStableDiffusionXL(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inversion_inputs = self.get_dummy_inversion_inputs(device) inversion_inputs["image"] = inversion_inputs["image"][0] pipe.invert(**inversion_inputs) inputs = self.get_dummy_inputs(device) inputs["edit_warmup_steps"] = [0, 5] pipe(**inputs).images inputs["edit_warmup_steps"] = [5, 0] pipe(**inputs).images inputs["edit_warmup_steps"] = [5, 10] pipe(**inputs).images inputs["edit_warmup_steps"] = [10, 5] pipe(**inputs).images @slow @require_torch_accelerator class LEditsPPPipelineStableDiffusionXLSlowTests(unittest.TestCase): @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png" ) raw_image = raw_image.convert("RGB").resize((512, 512)) cls.raw_image = raw_image def test_ledits_pp_edit(self): pipe = LEditsPPPipelineStableDiffusionXL.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", safety_checker=None, add_watermarker=None ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) _ = pipe.invert(image=self.raw_image, generator=generator, num_zero_noise_steps=0) inputs = { "generator": generator, "editing_prompt": ["cat", "dog"], "reverse_editing_direction": [True, False], "edit_guidance_scale": [2.0, 4.0], "edit_threshold": [0.8, 0.8], } reconstruction = pipe(**inputs, output_type="np").images[0] output_slice = reconstruction[150:153, 140:143, -1] output_slice = output_slice.flatten() expected_slice = np.array( [0.56419, 0.44121838, 0.2765603, 0.5708484, 0.42763475, 0.30945742, 0.5387106, 0.4735807, 0.3547244] ) assert np.abs(output_slice - expected_slice).max() < 1e-3
diffusers/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py/0
{ "file_path": "diffusers/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 5041 }
198
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, AutoPipelineForInpainting, PNDMScheduler, StableDiffusionPAGInpaintPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_image, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class StableDiffusionPAGInpaintPipelineFastTests( PipelineTesterMixin, IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionPAGInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( {"add_text_embeds", "add_time_ids", "mask", "masked_image_latents"} ) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), time_cond_proj_dim=time_cond_proj_dim, layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # create mask image[8:, 8:, :] = 255 mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "strength": 1.0, "pag_scale": 0.9, "output_type": "np", } return inputs def test_pag_applied_layers(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) # pag_applied_layers = ["mid","up","down"] should apply to all self-attention layers all_self_attn_layers = [k for k in pipe.unet.attn_processors.keys() if "attn1" in k] original_attn_procs = pipe.unet.attn_processors pag_layers = [ "down", "mid", "up", ] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) # pag_applied_layers = ["mid"], or ["mid.block_0"] or ["mid.block_0.attentions_0"] should apply to all self-attention layers in mid_block, i.e. # mid_block.attentions.0.transformer_blocks.0.attn1.processor # mid_block.attentions.0.transformer_blocks.1.attn1.processor all_self_attn_mid_layers = [ "mid_block.attentions.0.transformer_blocks.0.attn1.processor", # "mid_block.attentions.0.transformer_blocks.1.attn1.processor", ] pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["mid"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["mid_block"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["mid_block.attentions.0"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) # pag_applied_layers = ["mid.block_0.attentions_1"] does not exist in the model pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["mid_block.attentions.1"] with self.assertRaises(ValueError): pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) # pag_applied_layers = "down" should apply to all self-attention layers in down_blocks # down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor # down_blocks.1.attentions.0.transformer_blocks.1.attn1.processor # down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["down"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert len(pipe.pag_attn_processors) == 2 pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["down_blocks.0"] with self.assertRaises(ValueError): pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["down_blocks.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert len(pipe.pag_attn_processors) == 2 pipe.unet.set_attn_processor(original_attn_procs.copy()) pag_layers = ["down_blocks.1.attentions.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert len(pipe.pag_attn_processors) == 1 def test_pag_inference(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe_pag(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == ( 1, 64, 64, 3, ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" expected_slice = np.array([0.7190, 0.5807, 0.6007, 0.5600, 0.6350, 0.6639, 0.5680, 0.5664, 0.5230]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" def test_encode_prompt_works_in_isolation(self): extra_required_param_value_dict = { "device": torch.device(torch_device).type, "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, } return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict, atol=1e-3, rtol=1e-3) @slow @require_torch_accelerator class StableDiffusionPAGPipelineIntegrationTests(unittest.TestCase): pipeline_class = StableDiffusionPAGInpaintPipeline repo_id = "runwayml/stable-diffusion-v1-5" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", seed=0, guidance_scale=7.0): img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).convert("RGB") mask_image = load_image(mask_url).convert("RGB") generator = torch.Generator(device=generator_device).manual_seed(seed) inputs = { "prompt": "A majestic tiger sitting on a bench", "generator": generator, "image": init_image, "mask_image": mask_image, "strength": 0.8, "num_inference_steps": 3, "guidance_scale": guidance_scale, "pag_scale": 3.0, "output_type": "np", } return inputs def test_pag_cfg(self): pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipeline(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.38793945, 0.4111328, 0.47924805, 0.39208984, 0.4165039, 0.41674805, 0.37060547, 0.36791992, 0.40625] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( f"output is different from expected, {image_slice.flatten()}" ) def test_pag_uncond(self): pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) pipeline.enable_model_cpu_offload(device=torch_device) pipeline.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, guidance_scale=0.0) image = pipeline(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.3876953, 0.40356445, 0.4934082, 0.39697266, 0.41674805, 0.41015625, 0.375, 0.36914062, 0.40649414] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( f"output is different from expected, {image_slice.flatten()}" )
diffusers/tests/pipelines/pag/test_pag_sd_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/pag/test_pag_sd_inpaint.py", "repo_id": "diffusers", "token_count": 6097 }
199
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import nightly, require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @nightly @require_flax class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_flax(self): sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2", variant="bf16", dtype=jnp.bfloat16, ) prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512]) assert jnp.abs(output_slice - expected_slice).max() < 1e-2 @nightly @require_flax class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_dpm_flax(self): model_id = "stabilityai/stable-diffusion-2" scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, variant="bf16", dtype=jnp.bfloat16, ) params["scheduler"] = scheduler_params prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297]) assert jnp.abs(output_slice - expected_slice).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py", "repo_id": "diffusers", "token_count": 1668 }
200
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from parameterized import parameterized from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, LCMScheduler, MultiAdapter, StableDiffusionXLAdapterPipeline, T2IAdapter, UNet2DConditionModel, ) from diffusers.utils import logging from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableDiffusionXLAdapterPipelineFastTests(IPAdapterTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionXLAdapterPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS def get_dummy_components(self, adapter_type="full_adapter_xl", time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=time_cond_proj_dim, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") if adapter_type == "full_adapter_xl": adapter = T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=4, adapter_type=adapter_type, ) elif adapter_type == "multi_adapter": adapter = MultiAdapter( [ T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=4, adapter_type="full_adapter_xl", ), T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=4, adapter_type="full_adapter_xl", ), ] ) else: raise ValueError( f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter_xl', or 'multi_adapter''" ) components = { "adapter": adapter, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, # "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_components_with_full_downscaling(self, adapter_type="full_adapter_xl"): """Get dummy components with x8 VAE downscaling and 3 UNet down blocks. These dummy components are intended to fully-exercise the T2I-Adapter downscaling behavior. """ torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=2, use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=1, projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 32, 32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") if adapter_type == "full_adapter_xl": adapter = T2IAdapter( in_channels=3, channels=[32, 32, 64], num_res_blocks=2, downscale_factor=16, adapter_type=adapter_type, ) elif adapter_type == "multi_adapter": adapter = MultiAdapter( [ T2IAdapter( in_channels=3, channels=[32, 32, 64], num_res_blocks=2, downscale_factor=16, adapter_type="full_adapter_xl", ), T2IAdapter( in_channels=3, channels=[32, 32, 64], num_res_blocks=2, downscale_factor=16, adapter_type="full_adapter_xl", ), ] ) else: raise ValueError( f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter_xl', or 'multi_adapter''" ) components = { "adapter": adapter, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, # "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0, height=64, width=64, num_images=1): if num_images == 1: image = floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) else: image = [ floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) for _ in range(num_images) ] if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", } return inputs def test_ip_adapter(self, from_multi=False, expected_pipe_slice=None): if not from_multi: expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array( [0.5752, 0.6155, 0.4826, 0.5111, 0.5741, 0.4678, 0.5199, 0.5231, 0.4794] ) return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) @unittest.skip("We test this functionality elsewhere already.") def test_save_load_optional_components(self): pass def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([00.5752, 0.6155, 0.4826, 0.5111, 0.5741, 0.4678, 0.5199, 0.5231, 0.4794]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 @parameterized.expand( [ # (dim=144) The internal feature map will be 9x9 after initial pixel unshuffling (downscaled x16). (((4 * 2 + 1) * 16),), # (dim=160) The internal feature map will be 5x5 after the first T2I down block (downscaled x32). (((4 * 1 + 1) * 32),), ] ) def test_multiple_image_dimensions(self, dim): """Test that the T2I-Adapter pipeline supports any input dimension that is divisible by the adapter's `downscale_factor`. This test was added in response to an issue where the T2I Adapter's downscaling padding behavior did not match the UNet's behavior. Note that we have selected `dim` values to produce odd resolutions at each downscaling level. """ components = self.get_dummy_components_with_full_downscaling() sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device, height=dim, width=dim) image = sd_pipe(**inputs).images assert image.shape == (1, dim, dim, 3) @parameterized.expand(["full_adapter", "full_adapter_xl", "light_adapter"]) def test_total_downscale_factor(self, adapter_type): """Test that the T2IAdapter correctly reports its total_downscale_factor.""" batch_size = 1 in_channels = 3 out_channels = [320, 640, 1280, 1280] in_image_size = 512 adapter = T2IAdapter( in_channels=in_channels, channels=out_channels, num_res_blocks=2, downscale_factor=8, adapter_type=adapter_type, ) adapter.to(torch_device) in_image = floats_tensor((batch_size, in_channels, in_image_size, in_image_size)).to(torch_device) adapter_state = adapter(in_image) # Assume that the last element in `adapter_state` has been downsampled the most, and check # that it matches the `total_downscale_factor`. expected_out_image_size = in_image_size // adapter.total_downscale_factor assert adapter_state[-1].shape == ( batch_size, out_channels[-1], expected_out_image_size, expected_out_image_size, ) def test_adapter_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5425, 0.5385, 0.4964, 0.5045, 0.6149, 0.4974, 0.5469, 0.5332, 0.5426]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_adapter_sdxl_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5425, 0.5385, 0.4964, 0.5045, 0.6149, 0.4974, 0.5469, 0.5332, 0.5426]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionXLMultiAdapterPipelineFastTests( StableDiffusionXLAdapterPipelineFastTests, PipelineTesterMixin, unittest.TestCase ): supports_dduf = False def get_dummy_components(self, time_cond_proj_dim=None): return super().get_dummy_components("multi_adapter", time_cond_proj_dim=time_cond_proj_dim) def get_dummy_components_with_full_downscaling(self): return super().get_dummy_components_with_full_downscaling("multi_adapter") def get_dummy_inputs(self, device, seed=0, height=64, width=64): inputs = super().get_dummy_inputs(device, seed, height, width, num_images=2) inputs["adapter_conditioning_scale"] = [0.5, 0.5] return inputs def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5617, 0.6081, 0.4807, 0.5071, 0.5665, 0.4614, 0.5165, 0.5164, 0.4786]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 def test_ip_adapter(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array([0.5617, 0.6081, 0.4807, 0.5071, 0.5665, 0.4614, 0.5165, 0.5164, 0.4786]) return super().test_ip_adapter(from_multi=True, expected_pipe_slice=expected_pipe_slice) def test_inference_batch_consistent( self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"] ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs for batch_size in batch_sizes: batched_inputs = {} for name, value in inputs.items(): if name in self.batch_params: # prompt is string if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_inputs[name][-1] = 100 * "very long" elif name == "image": batched_images = [] for image in value: batched_images.append(batch_size * [image]) batched_inputs[name] = batched_images else: batched_inputs[name] = batch_size * [value] elif name == "batch_size": batched_inputs[name] = batch_size else: batched_inputs[name] = value for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] batched_inputs["output_type"] = "np" output = pipe(**batched_inputs) assert len(output[0]) == batch_size batched_inputs["output_type"] = "np" output = pipe(**batched_inputs)[0] assert output.shape[0] == batch_size logger.setLevel(level=diffusers.logging.WARNING) @unittest.skip("We test this functionality elsewhere already.") def test_save_load_optional_components(self): pass def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: if key == "image": batched_images = [] for image in inputs[key]: batched_images.append(batch_size * [image]) inputs[key] = batched_images else: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_inference_batch_single_identical( self, batch_size=3, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False, expected_max_diff=2e-3, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): if test_max_difference is None: # TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems # make sure that batched and non-batched is identical test_max_difference = torch_device != "mps" if test_mean_pixel_difference is None: # TODO same as above test_mean_pixel_difference = torch_device != "mps" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} for name, value in inputs.items(): if name in self.batch_params: # prompt is string if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_inputs[name][-1] = 100 * "very long" elif name == "image": batched_images = [] for image in value: batched_images.append(batch_size * [image]) batched_inputs[name] = batched_images else: batched_inputs[name] = batch_size * [value] elif name == "batch_size": batched_inputs[name] = batch_size elif name == "generator": batched_inputs[name] = [self.get_generator(i) for i in range(batch_size)] else: batched_inputs[name] = value for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size inputs["generator"] = self.get_generator(0) output = pipe(**inputs) logger.setLevel(level=diffusers.logging.WARNING) if test_max_difference: if relax_max_difference: # Taking the median of the largest <n> differences # is resilient to outliers diff = np.abs(output_batch[0][0] - output[0][0]) diff = diff.flatten() diff.sort() max_diff = np.median(diff[-5:]) else: max_diff = np.abs(output_batch[0][0] - output[0][0]).max() assert max_diff < expected_max_diff if test_mean_pixel_difference: assert_mean_pixel_difference(output_batch[0][0], output[0][0]) def test_adapter_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_adapter_sdxl_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py", "repo_id": "diffusers", "token_count": 12835 }
201
import gc import unittest import numpy as np import torch import torch.nn as nn from diffusers import ( AuraFlowPipeline, AuraFlowTransformer2DModel, DiffusionPipeline, FluxControlPipeline, FluxPipeline, FluxTransformer2DModel, GGUFQuantizationConfig, HiDreamImageTransformer2DModel, SD3Transformer2DModel, StableDiffusion3Pipeline, WanTransformer3DModel, WanVACETransformer3DModel, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( Expectations, backend_empty_cache, backend_max_memory_allocated, backend_reset_peak_memory_stats, enable_full_determinism, is_gguf_available, nightly, numpy_cosine_similarity_distance, require_accelerate, require_accelerator, require_big_accelerator, require_gguf_version_greater_or_equal, require_kernels_version_greater_or_equal, require_peft_backend, require_torch_version_greater, torch_device, ) from ..test_torch_compile_utils import QuantCompileTests if is_gguf_available(): import gguf from diffusers.quantizers.gguf.utils import GGUFLinear, GGUFParameter enable_full_determinism() @nightly @require_accelerate @require_accelerator @require_gguf_version_greater_or_equal("0.10.0") @require_kernels_version_greater_or_equal("0.9.0") class GGUFCudaKernelsTests(unittest.TestCase): def setUp(self): gc.collect() backend_empty_cache(torch_device) def tearDown(self): gc.collect() backend_empty_cache(torch_device) def test_cuda_kernels_vs_native(self): if torch_device != "cuda": self.skipTest("CUDA kernels test requires CUDA device") from diffusers.quantizers.gguf.utils import GGUFLinear, can_use_cuda_kernels if not can_use_cuda_kernels: self.skipTest("CUDA kernels not available (compute capability < 7 or kernels not installed)") test_quant_types = ["Q4_0", "Q4_K"] test_shape = (1, 64, 512) # batch, seq_len, hidden_dim compute_dtype = torch.bfloat16 for quant_type in test_quant_types: qtype = getattr(gguf.GGMLQuantizationType, quant_type) in_features, out_features = 512, 512 torch.manual_seed(42) float_weight = torch.randn(out_features, in_features, dtype=torch.float32) quantized_data = gguf.quants.quantize(float_weight.numpy(), qtype) weight_data = torch.from_numpy(quantized_data).to(device=torch_device) weight = GGUFParameter(weight_data, quant_type=qtype) x = torch.randn(test_shape, dtype=compute_dtype, device=torch_device) linear = GGUFLinear(in_features, out_features, bias=True, compute_dtype=compute_dtype) linear.weight = weight linear.bias = nn.Parameter(torch.randn(out_features, dtype=compute_dtype)) linear = linear.to(torch_device) with torch.no_grad(): output_native = linear.forward_native(x) output_cuda = linear.forward_cuda(x) assert torch.allclose(output_native, output_cuda, 1e-2), ( f"GGUF CUDA Kernel Output is different from Native Output for {quant_type}" ) @nightly @require_big_accelerator @require_accelerate @require_gguf_version_greater_or_equal("0.10.0") class GGUFSingleFileTesterMixin: ckpt_path = None model_cls = None torch_dtype = torch.bfloat16 expected_memory_use_in_gb = 5 def test_gguf_parameters(self): quant_storage_type = torch.uint8 quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) for param_name, param in model.named_parameters(): if isinstance(param, GGUFParameter): assert hasattr(param, "quant_type") assert param.dtype == quant_storage_type def test_gguf_linear_layers(self): quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear) and hasattr(module.weight, "quant_type"): assert module.weight.dtype == torch.uint8 if module.bias is not None: assert module.bias.dtype == self.torch_dtype def test_gguf_memory_usage(self): quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) model = self.model_cls.from_single_file( self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype ) model.to(torch_device) assert (model.get_memory_footprint() / 1024**3) < self.expected_memory_use_in_gb inputs = self.get_dummy_inputs() backend_reset_peak_memory_stats(torch_device) backend_empty_cache(torch_device) with torch.no_grad(): model(**inputs) max_memory = backend_max_memory_allocated(torch_device) assert (max_memory / 1024**3) < self.expected_memory_use_in_gb def test_keep_modules_in_fp32(self): r""" A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32. Also ensures if inference works. """ _keep_in_fp32_modules = self.model_cls._keep_in_fp32_modules self.model_cls._keep_in_fp32_modules = ["proj_out"] quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if name in model._keep_in_fp32_modules: assert module.weight.dtype == torch.float32 self.model_cls._keep_in_fp32_modules = _keep_in_fp32_modules def test_dtype_assignment(self): quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) with self.assertRaises(ValueError): # Tries with a `dtype` model.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` and `dtype` device_0 = f"{torch_device}:0" model.to(device=device_0, dtype=torch.float16) with self.assertRaises(ValueError): # Tries with a cast model.float() with self.assertRaises(ValueError): # Tries with a cast model.half() # This should work model.to(torch_device) def test_dequantize_model(self): quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) model.dequantize() def _check_for_gguf_linear(model): has_children = list(model.children()) if not has_children: return for name, module in model.named_children(): if isinstance(module, nn.Linear): assert not isinstance(module, GGUFLinear), f"{name} is still GGUFLinear" assert not isinstance(module.weight, GGUFParameter), f"{name} weight is still GGUFParameter" for name, module in model.named_children(): _check_for_gguf_linear(module) class FluxGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): ckpt_path = "https://huggingface.co/city96/FLUX.1-dev-gguf/blob/main/flux1-dev-Q2_K.gguf" diffusers_ckpt_path = "https://huggingface.co/sayakpaul/flux-diffusers-gguf/blob/main/model-Q4_0.gguf" torch_dtype = torch.bfloat16 model_cls = FluxTransformer2DModel expected_memory_use_in_gb = 5 def setUp(self): gc.collect() backend_empty_cache(torch_device) def tearDown(self): gc.collect() backend_empty_cache(torch_device) def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 4096, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "pooled_projections": torch.randn( (1, 768), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), "img_ids": torch.randn((4096, 3), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "txt_ids": torch.randn((512, 3), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "guidance": torch.tensor([3.5]).to(torch_device, self.torch_dtype), } def test_pipeline_inference(self): quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) transformer = self.model_cls.from_single_file( self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype ) pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=self.torch_dtype ) pipe.enable_model_cpu_offload() prompt = "a cat holding a sign that says hello" output = pipe( prompt=prompt, num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np" ).images[0] output_slice = output[:3, :3, :].flatten() expected_slice = np.array( [ 0.47265625, 0.43359375, 0.359375, 0.47070312, 0.421875, 0.34375, 0.46875, 0.421875, 0.34765625, 0.46484375, 0.421875, 0.34179688, 0.47070312, 0.42578125, 0.34570312, 0.46875, 0.42578125, 0.3515625, 0.45507812, 0.4140625, 0.33984375, 0.4609375, 0.41796875, 0.34375, 0.45898438, 0.41796875, 0.34375, ] ) max_diff = numpy_cosine_similarity_distance(expected_slice, output_slice) assert max_diff < 1e-4 def test_loading_gguf_diffusers_format(self): model = self.model_cls.from_single_file( self.diffusers_ckpt_path, subfolder="transformer", quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), config="black-forest-labs/FLUX.1-dev", ) model.to(torch_device) model(**self.get_dummy_inputs()) class SD35LargeGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): ckpt_path = "https://huggingface.co/city96/stable-diffusion-3.5-large-gguf/blob/main/sd3.5_large-Q4_0.gguf" torch_dtype = torch.bfloat16 model_cls = SD3Transformer2DModel expected_memory_use_in_gb = 5 def setUp(self): gc.collect() backend_empty_cache(torch_device) def tearDown(self): gc.collect() backend_empty_cache(torch_device) def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 16, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "pooled_projections": torch.randn( (1, 2048), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), } def test_pipeline_inference(self): quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) transformer = self.model_cls.from_single_file( self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype ) pipe = StableDiffusion3Pipeline.from_pretrained( "stabilityai/stable-diffusion-3.5-large", transformer=transformer, torch_dtype=self.torch_dtype ) pipe.enable_model_cpu_offload() prompt = "a cat holding a sign that says hello" output = pipe( prompt=prompt, num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np", ).images[0] output_slice = output[:3, :3, :].flatten() expected_slices = Expectations( { ("xpu", 3): np.array( [ 0.1953125, 0.3125, 0.31445312, 0.13085938, 0.30664062, 0.29296875, 0.11523438, 0.2890625, 0.28320312, 0.16601562, 0.3046875, 0.328125, 0.140625, 0.31640625, 0.32421875, 0.12304688, 0.3046875, 0.3046875, 0.17578125, 0.3359375, 0.3203125, 0.16601562, 0.34375, 0.31640625, 0.15429688, 0.328125, 0.31054688, ] ), ("cuda", 7): np.array( [ 0.17578125, 0.27539062, 0.27734375, 0.11914062, 0.26953125, 0.25390625, 0.109375, 0.25390625, 0.25, 0.15039062, 0.26171875, 0.28515625, 0.13671875, 0.27734375, 0.28515625, 0.12109375, 0.26757812, 0.265625, 0.16210938, 0.29882812, 0.28515625, 0.15625, 0.30664062, 0.27734375, 0.14648438, 0.29296875, 0.26953125, ] ), } ) expected_slice = expected_slices.get_expectation() max_diff = numpy_cosine_similarity_distance(expected_slice, output_slice) assert max_diff < 1e-4 class SD35MediumGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): ckpt_path = "https://huggingface.co/city96/stable-diffusion-3.5-medium-gguf/blob/main/sd3.5_medium-Q3_K_M.gguf" torch_dtype = torch.bfloat16 model_cls = SD3Transformer2DModel expected_memory_use_in_gb = 2 def setUp(self): gc.collect() backend_empty_cache(torch_device) def tearDown(self): gc.collect() backend_empty_cache(torch_device) def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 16, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "pooled_projections": torch.randn( (1, 2048), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), } def test_pipeline_inference(self): quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) transformer = self.model_cls.from_single_file( self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype ) pipe = StableDiffusion3Pipeline.from_pretrained( "stabilityai/stable-diffusion-3.5-medium", transformer=transformer, torch_dtype=self.torch_dtype ) pipe.enable_model_cpu_offload() prompt = "a cat holding a sign that says hello" output = pipe( prompt=prompt, num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np" ).images[0] output_slice = output[:3, :3, :].flatten() expected_slice = np.array( [ 0.625, 0.6171875, 0.609375, 0.65625, 0.65234375, 0.640625, 0.6484375, 0.640625, 0.625, 0.6484375, 0.63671875, 0.6484375, 0.66796875, 0.65625, 0.65234375, 0.6640625, 0.6484375, 0.6328125, 0.6640625, 0.6484375, 0.640625, 0.67578125, 0.66015625, 0.62109375, 0.671875, 0.65625, 0.62109375, ] ) max_diff = numpy_cosine_similarity_distance(expected_slice, output_slice) assert max_diff < 1e-4 class AuraFlowGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): ckpt_path = "https://huggingface.co/city96/AuraFlow-v0.3-gguf/blob/main/aura_flow_0.3-Q2_K.gguf" torch_dtype = torch.bfloat16 model_cls = AuraFlowTransformer2DModel expected_memory_use_in_gb = 4 def setUp(self): gc.collect() backend_empty_cache(torch_device) def tearDown(self): gc.collect() backend_empty_cache(torch_device) def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 4, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 2048), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), } def test_pipeline_inference(self): quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) transformer = self.model_cls.from_single_file( self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype ) pipe = AuraFlowPipeline.from_pretrained( "fal/AuraFlow-v0.3", transformer=transformer, torch_dtype=self.torch_dtype ) pipe.enable_model_cpu_offload() prompt = "a pony holding a sign that says hello" output = pipe( prompt=prompt, num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np" ).images[0] output_slice = output[:3, :3, :].flatten() expected_slice = np.array( [ 0.46484375, 0.546875, 0.64453125, 0.48242188, 0.53515625, 0.59765625, 0.47070312, 0.5078125, 0.5703125, 0.42773438, 0.50390625, 0.5703125, 0.47070312, 0.515625, 0.57421875, 0.45898438, 0.48632812, 0.53515625, 0.4453125, 0.5078125, 0.56640625, 0.47851562, 0.5234375, 0.57421875, 0.48632812, 0.5234375, 0.56640625, ] ) max_diff = numpy_cosine_similarity_distance(expected_slice, output_slice) assert max_diff < 1e-4 @require_peft_backend @nightly @require_big_accelerator @require_accelerate @require_gguf_version_greater_or_equal("0.10.0") class FluxControlLoRAGGUFTests(unittest.TestCase): def test_lora_loading(self): ckpt_path = "https://huggingface.co/city96/FLUX.1-dev-gguf/blob/main/flux1-dev-Q2_K.gguf" transformer = FluxTransformer2DModel.from_single_file( ckpt_path, quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), torch_dtype=torch.bfloat16, ) pipe = FluxControlPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16, ).to(torch_device) pipe.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora") prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." control_image = load_image( "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/control_image_robot_canny.png" ) output = pipe( prompt=prompt, control_image=control_image, height=256, width=256, num_inference_steps=10, guidance_scale=30.0, output_type="np", generator=torch.manual_seed(0), ).images out_slice = output[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.8047, 0.8359, 0.8711, 0.6875, 0.7070, 0.7383, 0.5469, 0.5820, 0.6641]) max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) self.assertTrue(max_diff < 1e-3) class HiDreamGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): ckpt_path = "https://huggingface.co/city96/HiDream-I1-Dev-gguf/blob/main/hidream-i1-dev-Q2_K.gguf" torch_dtype = torch.bfloat16 model_cls = HiDreamImageTransformer2DModel expected_memory_use_in_gb = 8 def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 16, 128, 128), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states_t5": torch.randn( (1, 128, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "encoder_hidden_states_llama3": torch.randn( (32, 1, 128, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "pooled_embeds": torch.randn( (1, 2048), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timesteps": torch.tensor([1]).to(torch_device, self.torch_dtype), } class WanGGUFTexttoVideoSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): ckpt_path = "https://huggingface.co/city96/Wan2.1-T2V-14B-gguf/blob/main/wan2.1-t2v-14b-Q3_K_S.gguf" torch_dtype = torch.bfloat16 model_cls = WanTransformer3DModel expected_memory_use_in_gb = 9 def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 16, 2, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), } class WanGGUFImagetoVideoSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): ckpt_path = "https://huggingface.co/city96/Wan2.1-I2V-14B-480P-gguf/blob/main/wan2.1-i2v-14b-480p-Q3_K_S.gguf" torch_dtype = torch.bfloat16 model_cls = WanTransformer3DModel expected_memory_use_in_gb = 9 def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 36, 2, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "encoder_hidden_states_image": torch.randn( (1, 257, 1280), generator=torch.Generator("cpu").manual_seed(0) ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), } class WanVACEGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): ckpt_path = "https://huggingface.co/QuantStack/Wan2.1_14B_VACE-GGUF/blob/main/Wan2.1_14B_VACE-Q3_K_S.gguf" torch_dtype = torch.bfloat16 model_cls = WanVACETransformer3DModel expected_memory_use_in_gb = 9 def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 16, 2, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "control_hidden_states": torch.randn( (1, 96, 2, 64, 64), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "control_hidden_states_scale": torch.randn( (8,), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), } @require_torch_version_greater("2.7.1") class GGUFCompileTests(QuantCompileTests, unittest.TestCase): torch_dtype = torch.bfloat16 gguf_ckpt = "https://huggingface.co/city96/FLUX.1-dev-gguf/blob/main/flux1-dev-Q2_K.gguf" @property def quantization_config(self): return GGUFQuantizationConfig(compute_dtype=self.torch_dtype) def _init_pipeline(self, *args, **kwargs): transformer = FluxTransformer2DModel.from_single_file( self.gguf_ckpt, quantization_config=self.quantization_config, torch_dtype=self.torch_dtype ) pipe = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=self.torch_dtype ) return pipe
diffusers/tests/quantization/gguf/test_gguf.py/0
{ "file_path": "diffusers/tests/quantization/gguf/test_gguf.py", "repo_id": "diffusers", "token_count": 14853 }
202
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import ( FluxTransformer2DModel, ) from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, torch_device, ) enable_full_determinism() @require_torch_accelerator class FluxTransformer2DModelSingleFileTests(unittest.TestCase): model_class = FluxTransformer2DModel ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors" alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"] repo_id = "black-forest-labs/FLUX.1-dev" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_single_file_components(self): model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer") model_single_file = self.model_class.from_single_file(self.ckpt_path) PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] for param_name, param_value in model_single_file.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert model.config[param_name] == param_value, ( f"{param_name} differs between single file loading and pretrained loading" ) def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: backend_empty_cache(torch_device) model = self.model_class.from_single_file(ckpt_path) del model gc.collect() backend_empty_cache(torch_device)
diffusers/tests/single_file/test_model_flux_transformer_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_model_flux_transformer_single_file.py", "repo_id": "diffusers", "token_count": 959 }
203
import gc import unittest import torch from diffusers import ( DDIMScheduler, StableDiffusionXLImg2ImgPipeline, ) from diffusers.utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from .single_file_testing_utils import SDXLSingleFileTesterMixin enable_full_determinism() @slow @require_torch_accelerator class StableDiffusionXLImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin): pipeline_class = StableDiffusionXLImg2ImgPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" repo_id = "stabilityai/stable-diffusion-xl-base-1.0" original_config = ( "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" ) def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_single_file_format_inference_is_same_as_pretrained(self): super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) @slow @require_torch_accelerator class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests(unittest.TestCase): pipeline_class = StableDiffusionXLImg2ImgPipeline ckpt_path = ( "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors" ) repo_id = "stabilityai/stable-diffusion-xl-refiner-1.0" original_config = ( "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml" ) def test_single_file_format_inference_is_same_as_pretrained(self): init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload(device=torch_device) generator = torch.Generator(device="cpu").manual_seed(0) image = pipe( prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" ).images[0] pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, torch_dtype=torch.float16) pipe_single_file.scheduler = DDIMScheduler.from_config(pipe_single_file.scheduler.config) pipe_single_file.unet.set_default_attn_processor() pipe_single_file.enable_model_cpu_offload(device=torch_device) generator = torch.Generator(device="cpu").manual_seed(0) image_single_file = pipe_single_file( prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" ).images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_single_file.flatten()) assert max_diff < 5e-4
diffusers/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py/0
{ "file_path": "diffusers/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py", "repo_id": "diffusers", "token_count": 1770 }
204
- sections: - local: index title: LeRobot - local: installation title: Installation title: Get started - sections: - local: il_robots title: Imitation Learning for Robots - local: il_sim title: Imitation Learning in Sim - local: cameras title: Cameras - local: integrate_hardware title: Bring Your Own Hardware - local: hilserl title: Train a Robot with RL - local: hilserl_sim title: Train RL in Simulation - local: async title: Use Async Inference title: "Tutorials" - sections: - local: smolvla title: Finetune SmolVLA title: "Policies" - sections: - local: hope_jr title: Hope Jr - local: so101 title: SO-101 - local: so100 title: SO-100 - local: koch title: Koch v1.1 - local: lekiwi title: LeKiwi title: "Robots" - sections: - local: notebooks title: Notebooks title: "Resources" - sections: - local: contributing title: Contribute to LeRobot - local: backwardcomp title: Backward compatibility title: "About"
lerobot/docs/source/_toctree.yml/0
{ "file_path": "lerobot/docs/source/_toctree.yml", "repo_id": "lerobot", "token_count": 383 }
205
import time from lerobot.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig from lerobot.teleoperators.keyboard.teleop_keyboard import KeyboardTeleop, KeyboardTeleopConfig from lerobot.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig from lerobot.utils.robot_utils import busy_wait from lerobot.utils.visualization_utils import _init_rerun, log_rerun_data FPS = 30 # Create the robot and teleoperator configurations robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="my_lekiwi") teleop_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem585A0077581", id="my_awesome_leader_arm") keyboard_config = KeyboardTeleopConfig(id="my_laptop_keyboard") robot = LeKiwiClient(robot_config) leader_arm = SO100Leader(teleop_arm_config) keyboard = KeyboardTeleop(keyboard_config) # To connect you already should have this script running on LeKiwi: `python -m lerobot.robots.lekiwi.lekiwi_host --robot.id=my_awesome_kiwi` robot.connect() leader_arm.connect() keyboard.connect() _init_rerun(session_name="lekiwi_teleop") if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected: raise ValueError("Robot, leader arm of keyboard is not connected!") while True: t0 = time.perf_counter() observation = robot.get_observation() arm_action = leader_arm.get_action() arm_action = {f"arm_{k}": v for k, v in arm_action.items()} keyboard_keys = keyboard.get_action() base_action = robot._from_keyboard_to_base_action(keyboard_keys) log_rerun_data(observation, {**arm_action, **base_action}) action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action robot.send_action(action) busy_wait(max(1.0 / FPS - (time.perf_counter() - t0), 0.0))
lerobot/examples/lekiwi/teleoperate.py/0
{ "file_path": "lerobot/examples/lekiwi/teleoperate.py", "repo_id": "lerobot", "token_count": 634 }
206
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains lists of available environments, dataset and policies to reflect the current state of LeRobot library. We do not want to import all the dependencies, but instead we keep it lightweight to ensure fast access to these variables. Example: ```python import lerobot print(lerobot.available_envs) print(lerobot.available_tasks_per_env) print(lerobot.available_datasets) print(lerobot.available_datasets_per_env) print(lerobot.available_real_world_datasets) print(lerobot.available_policies) print(lerobot.available_policies_per_env) print(lerobot.available_robots) print(lerobot.available_cameras) print(lerobot.available_motors) ``` When implementing a new dataset loadable with LeRobotDataset follow these steps: - Update `available_datasets_per_env` in `lerobot/__init__.py` When implementing a new environment (e.g. `gym_aloha`), follow these steps: - Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py` When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps: - Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py` - Set the required `name` class attribute. - Update variables in `tests/test_available.py` by importing your new Policy class """ import itertools from lerobot.__version__ import __version__ # noqa: F401 # TODO(rcadene): Improve policies and envs. As of now, an item in `available_policies` # refers to a yaml file AND a modeling name. Same for `available_envs` which refers to # a yaml file AND a environment name. The difference should be more obvious. available_tasks_per_env = { "aloha": [ "AlohaInsertion-v0", "AlohaTransferCube-v0", ], "pusht": ["PushT-v0"], "xarm": ["XarmLift-v0"], } available_envs = list(available_tasks_per_env.keys()) available_datasets_per_env = { "aloha": [ "lerobot/aloha_sim_insertion_human", "lerobot/aloha_sim_insertion_scripted", "lerobot/aloha_sim_transfer_cube_human", "lerobot/aloha_sim_transfer_cube_scripted", "lerobot/aloha_sim_insertion_human_image", "lerobot/aloha_sim_insertion_scripted_image", "lerobot/aloha_sim_transfer_cube_human_image", "lerobot/aloha_sim_transfer_cube_scripted_image", ], # TODO(alexander-soare): Add "lerobot/pusht_keypoints". Right now we can't because this is too tightly # coupled with tests. "pusht": ["lerobot/pusht", "lerobot/pusht_image"], "xarm": [ "lerobot/xarm_lift_medium", "lerobot/xarm_lift_medium_replay", "lerobot/xarm_push_medium", "lerobot/xarm_push_medium_replay", "lerobot/xarm_lift_medium_image", "lerobot/xarm_lift_medium_replay_image", "lerobot/xarm_push_medium_image", "lerobot/xarm_push_medium_replay_image", ], } available_real_world_datasets = [ "lerobot/aloha_mobile_cabinet", "lerobot/aloha_mobile_chair", "lerobot/aloha_mobile_elevator", "lerobot/aloha_mobile_shrimp", "lerobot/aloha_mobile_wash_pan", "lerobot/aloha_mobile_wipe_wine", "lerobot/aloha_static_battery", "lerobot/aloha_static_candy", "lerobot/aloha_static_coffee", "lerobot/aloha_static_coffee_new", "lerobot/aloha_static_cups_open", "lerobot/aloha_static_fork_pick_up", "lerobot/aloha_static_pingpong_test", "lerobot/aloha_static_pro_pencil", "lerobot/aloha_static_screw_driver", "lerobot/aloha_static_tape", "lerobot/aloha_static_thread_velcro", "lerobot/aloha_static_towel", "lerobot/aloha_static_vinh_cup", "lerobot/aloha_static_vinh_cup_left", "lerobot/aloha_static_ziploc_slide", "lerobot/umi_cup_in_the_wild", "lerobot/unitreeh1_fold_clothes", "lerobot/unitreeh1_rearrange_objects", "lerobot/unitreeh1_two_robot_greeting", "lerobot/unitreeh1_warehouse", "lerobot/nyu_rot_dataset", "lerobot/utokyo_saytap", "lerobot/imperialcollege_sawyer_wrist_cam", "lerobot/utokyo_xarm_bimanual", "lerobot/tokyo_u_lsmo", "lerobot/utokyo_pr2_opening_fridge", "lerobot/cmu_franka_exploration_dataset", "lerobot/cmu_stretch", "lerobot/asu_table_top", "lerobot/utokyo_pr2_tabletop_manipulation", "lerobot/utokyo_xarm_pick_and_place", "lerobot/ucsd_kitchen_dataset", "lerobot/austin_buds_dataset", "lerobot/dlr_sara_grid_clamp", "lerobot/conq_hose_manipulation", "lerobot/columbia_cairlab_pusht_real", "lerobot/dlr_sara_pour", "lerobot/dlr_edan_shared_control", "lerobot/ucsd_pick_and_place_dataset", "lerobot/berkeley_cable_routing", "lerobot/nyu_franka_play_dataset", "lerobot/austin_sirius_dataset", "lerobot/cmu_play_fusion", "lerobot/berkeley_gnm_sac_son", "lerobot/nyu_door_opening_surprising_effectiveness", "lerobot/berkeley_fanuc_manipulation", "lerobot/jaco_play", "lerobot/viola", "lerobot/kaist_nonprehensile", "lerobot/berkeley_mvp", "lerobot/uiuc_d3field", "lerobot/berkeley_gnm_recon", "lerobot/austin_sailor_dataset", "lerobot/utaustin_mutex", "lerobot/roboturk", "lerobot/stanford_hydra_dataset", "lerobot/berkeley_autolab_ur5", "lerobot/stanford_robocook", "lerobot/toto", "lerobot/fmb", "lerobot/droid_100", "lerobot/berkeley_rpt", "lerobot/stanford_kuka_multimodal_dataset", "lerobot/iamlab_cmu_pickup_insert", "lerobot/taco_play", "lerobot/berkeley_gnm_cory_hall", "lerobot/usc_cloth_sim", ] available_datasets = sorted( set(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets)) ) # lists all available policies from `lerobot/policies` available_policies = ["act", "diffusion", "tdmpc", "vqbet"] # lists all available robots from `lerobot/robots` available_robots = [ "koch", "koch_bimanual", "aloha", "so100", "so101", ] # lists all available cameras from `lerobot/cameras` available_cameras = [ "opencv", "intelrealsense", ] # lists all available motors from `lerobot/motors` available_motors = [ "dynamixel", "feetech", ] # keys and values refer to yaml files available_policies_per_env = { "aloha": ["act"], "pusht": ["diffusion", "vqbet"], "xarm": ["tdmpc"], "koch_real": ["act_koch_real"], "aloha_real": ["act_aloha_real"], } env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks] env_dataset_pairs = [ (env, dataset) for env, datasets in available_datasets_per_env.items() for dataset in datasets ] env_dataset_policy_triplets = [ (env, dataset, policy) for env, datasets in available_datasets_per_env.items() for dataset in datasets for policy in available_policies_per_env[env] ]
lerobot/src/lerobot/__init__.py/0
{ "file_path": "lerobot/src/lerobot/__init__.py", "repo_id": "lerobot", "token_count": 3211 }
207
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import builtins import json import logging import os import tempfile from dataclasses import dataclass, field from pathlib import Path from typing import TypeVar import draccus from huggingface_hub import hf_hub_download from huggingface_hub.constants import CONFIG_NAME from huggingface_hub.errors import HfHubHTTPError from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature from lerobot.constants import ACTION, OBS_STATE from lerobot.optim.optimizers import OptimizerConfig from lerobot.optim.schedulers import LRSchedulerConfig from lerobot.utils.hub import HubMixin from lerobot.utils.utils import auto_select_torch_device, is_amp_available, is_torch_device_available T = TypeVar("T", bound="PreTrainedConfig") @dataclass class PreTrainedConfig(draccus.ChoiceRegistry, HubMixin, abc.ABC): """ Base configuration class for policy models. Args: n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the current step and additional steps going back). input_shapes: A dictionary defining the shapes of the input data for the policy. output_shapes: A dictionary defining the shapes of the output data for the policy. input_normalization_modes: A dictionary with key representing the modality and the value specifies the normalization mode to apply. output_normalization_modes: Similar dictionary as `input_normalization_modes`, but to unnormalize to the original scale. """ n_obs_steps: int = 1 normalization_mapping: dict[str, NormalizationMode] = field(default_factory=dict) input_features: dict[str, PolicyFeature] = field(default_factory=dict) output_features: dict[str, PolicyFeature] = field(default_factory=dict) device: str | None = None # cuda | cpu | mp # `use_amp` determines whether to use Automatic Mixed Precision (AMP) for training and evaluation. With AMP, # automatic gradient scaling is used. use_amp: bool = False push_to_hub: bool = True repo_id: str | None = None # Upload on private repository on the Hugging Face hub. private: bool | None = None # Add tags to your policy on the hub. tags: list[str] | None = None # Add tags to your policy on the hub. license: str | None = None def __post_init__(self): self.pretrained_path = None if not self.device or not is_torch_device_available(self.device): auto_device = auto_select_torch_device() logging.warning(f"Device '{self.device}' is not available. Switching to '{auto_device}'.") self.device = auto_device.type # Automatically deactivate AMP if necessary if self.use_amp and not is_amp_available(self.device): logging.warning( f"Automatic Mixed Precision (amp) is not available on device '{self.device}'. Deactivating AMP." ) self.use_amp = False @property def type(self) -> str: return self.get_choice_name(self.__class__) @property @abc.abstractmethod def observation_delta_indices(self) -> list | None: raise NotImplementedError @property @abc.abstractmethod def action_delta_indices(self) -> list | None: raise NotImplementedError @property @abc.abstractmethod def reward_delta_indices(self) -> list | None: raise NotImplementedError @abc.abstractmethod def get_optimizer_preset(self) -> OptimizerConfig: raise NotImplementedError @abc.abstractmethod def get_scheduler_preset(self) -> LRSchedulerConfig | None: raise NotImplementedError @abc.abstractmethod def validate_features(self) -> None: raise NotImplementedError @property def robot_state_feature(self) -> PolicyFeature | None: for ft_name, ft in self.input_features.items(): if ft.type is FeatureType.STATE and ft_name == OBS_STATE: return ft return None @property def env_state_feature(self) -> PolicyFeature | None: for _, ft in self.input_features.items(): if ft.type is FeatureType.ENV: return ft return None @property def image_features(self) -> dict[str, PolicyFeature]: return {key: ft for key, ft in self.input_features.items() if ft.type is FeatureType.VISUAL} @property def action_feature(self) -> PolicyFeature | None: for ft_name, ft in self.output_features.items(): if ft.type is FeatureType.ACTION and ft_name == ACTION: return ft return None def _save_pretrained(self, save_directory: Path) -> None: with open(save_directory / CONFIG_NAME, "w") as f, draccus.config_type("json"): draccus.dump(self, f, indent=4) @classmethod def from_pretrained( cls: builtins.type[T], pretrained_name_or_path: str | Path, *, force_download: bool = False, resume_download: bool = None, proxies: dict | None = None, token: str | bool | None = None, cache_dir: str | Path | None = None, local_files_only: bool = False, revision: str | None = None, **policy_kwargs, ) -> T: model_id = str(pretrained_name_or_path) config_file: str | None = None if Path(model_id).is_dir(): if CONFIG_NAME in os.listdir(model_id): config_file = os.path.join(model_id, CONFIG_NAME) else: print(f"{CONFIG_NAME} not found in {Path(model_id).resolve()}") else: try: config_file = hf_hub_download( repo_id=model_id, filename=CONFIG_NAME, revision=revision, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) except HfHubHTTPError as e: raise FileNotFoundError( f"{CONFIG_NAME} not found on the HuggingFace Hub in {model_id}" ) from e # HACK: Parse the original config to get the config subclass, so that we can # apply cli overrides. # This is very ugly, ideally we'd like to be able to do that natively with draccus # something like --policy.path (in addition to --policy.type) with draccus.config_type("json"): orig_config = draccus.parse(cls, config_file, args=[]) with open(config_file) as f: config = json.load(f) config.pop("type") with tempfile.NamedTemporaryFile("w+") as f: json.dump(config, f) config_file = f.name f.flush() cli_overrides = policy_kwargs.pop("cli_overrides", []) with draccus.config_type("json"): return draccus.parse(orig_config.__class__, config_file, args=cli_overrides)
lerobot/src/lerobot/configs/policies.py/0
{ "file_path": "lerobot/src/lerobot/configs/policies.py", "repo_id": "lerobot", "token_count": 3141 }
208
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script will help you convert any LeRobot dataset already pushed to the hub from codebase version 1.6 to 2.0. You will be required to provide the 'tasks', which is a short but accurate description in plain English for each of the task performed in the dataset. This will allow to easily train models with task-conditioning. We support 3 different scenarios for these tasks (see instructions below): 1. Single task dataset: all episodes of your dataset have the same single task. 2. Single task episodes: the episodes of your dataset each contain a single task but they can differ from one episode to the next. 3. Multi task episodes: episodes of your dataset may each contain several different tasks. Can you can also provide a robot config .yaml file (not mandatory) to this script via the option '--robot-config' so that it writes information about the robot (robot type, motors names) this dataset was recorded with. For now, only Aloha/Koch type robots are supported with this option. # 1. Single task dataset If your dataset contains a single task, you can simply provide it directly via the CLI with the '--single-task' option. Examples: ```bash python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \ --repo-id lerobot/aloha_sim_insertion_human_image \ --single-task "Insert the peg into the socket." \ --robot-config lerobot/configs/robot/aloha.yaml \ --local-dir data ``` ```bash python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \ --repo-id aliberts/koch_tutorial \ --single-task "Pick the Lego block and drop it in the box on the right." \ --robot-config lerobot/configs/robot/koch.yaml \ --local-dir data ``` # 2. Single task episodes If your dataset is a multi-task dataset, you have two options to provide the tasks to this script: - If your dataset already contains a language instruction column in its parquet file, you can simply provide this column's name with the '--tasks-col' arg. Example: ```bash python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \ --repo-id lerobot/stanford_kuka_multimodal_dataset \ --tasks-col "language_instruction" \ --local-dir data ``` - If your dataset doesn't contain a language instruction, you should provide the path to a .json file with the '--tasks-path' arg. This file should have the following structure where keys correspond to each episode_index in the dataset, and values are the language instruction for that episode. Example: ```json { "0": "Do something", "1": "Do something else", "2": "Do something", "3": "Go there", ... } ``` # 3. Multi task episodes If you have multiple tasks per episodes, your dataset should contain a language instruction column in its parquet file, and you must provide this column's name with the '--tasks-col' arg. Example: ```bash python -m lerobot.datasets.v2.convert_dataset_v1_to_v2 \ --repo-id lerobot/stanford_kuka_multimodal_dataset \ --tasks-col "language_instruction" \ --local-dir data ``` """ import argparse import contextlib import filecmp import json import logging import math import shutil import subprocess import tempfile from pathlib import Path import datasets import pyarrow.compute as pc import pyarrow.parquet as pq import torch from datasets import Dataset from huggingface_hub import HfApi from huggingface_hub.errors import EntryNotFoundError, HfHubHTTPError from safetensors.torch import load_file from lerobot.datasets.utils import ( DEFAULT_CHUNK_SIZE, DEFAULT_PARQUET_PATH, DEFAULT_VIDEO_PATH, EPISODES_PATH, INFO_PATH, STATS_PATH, TASKS_PATH, create_branch, create_lerobot_dataset_card, flatten_dict, get_safe_version, load_json, unflatten_dict, write_json, write_jsonlines, ) from lerobot.datasets.video_utils import ( VideoFrame, # noqa: F401 get_image_pixel_channels, get_video_info, ) from lerobot.robots import RobotConfig V16 = "v1.6" V20 = "v2.0" GITATTRIBUTES_REF = "aliberts/gitattributes_reference" V1_VIDEO_FILE = "{video_key}_episode_{episode_index:06d}.mp4" V1_INFO_PATH = "meta_data/info.json" V1_STATS_PATH = "meta_data/stats.safetensors" def parse_robot_config(robot_cfg: RobotConfig) -> tuple[str, dict]: if robot_cfg.type in ["aloha", "koch"]: state_names = [ f"{arm}_{motor}" if len(robot_cfg.follower_arms) > 1 else motor for arm in robot_cfg.follower_arms for motor in robot_cfg.follower_arms[arm].motors ] action_names = [ # f"{arm}_{motor}" for arm in ["left", "right"] for motor in robot_cfg["leader_arms"][arm]["motors"] f"{arm}_{motor}" if len(robot_cfg.leader_arms) > 1 else motor for arm in robot_cfg.leader_arms for motor in robot_cfg.leader_arms[arm].motors ] # elif robot_cfg["robot_type"] == "stretch3": TODO else: raise NotImplementedError( "Please provide robot_config={'robot_type': ..., 'names': ...} directly to convert_dataset()." ) return { "robot_type": robot_cfg.type, "names": { "observation.state": state_names, "observation.effort": state_names, "action": action_names, }, } def convert_stats_to_json(v1_dir: Path, v2_dir: Path) -> None: safetensor_path = v1_dir / V1_STATS_PATH stats = load_file(safetensor_path) serialized_stats = {key: value.tolist() for key, value in stats.items()} serialized_stats = unflatten_dict(serialized_stats) json_path = v2_dir / STATS_PATH json_path.parent.mkdir(exist_ok=True, parents=True) with open(json_path, "w") as f: json.dump(serialized_stats, f, indent=4) # Sanity check with open(json_path) as f: stats_json = json.load(f) stats_json = flatten_dict(stats_json) stats_json = {key: torch.tensor(value) for key, value in stats_json.items()} for key in stats: torch.testing.assert_close(stats_json[key], stats[key]) def get_features_from_hf_dataset( dataset: Dataset, robot_config: RobotConfig | None = None ) -> dict[str, list]: robot_config = parse_robot_config(robot_config) features = {} for key, ft in dataset.features.items(): if isinstance(ft, datasets.Value): dtype = ft.dtype shape = (1,) names = None if isinstance(ft, datasets.Sequence): assert isinstance(ft.feature, datasets.Value) dtype = ft.feature.dtype shape = (ft.length,) motor_names = ( robot_config["names"][key] if robot_config else [f"motor_{i}" for i in range(ft.length)] ) assert len(motor_names) == shape[0] names = {"motors": motor_names} elif isinstance(ft, datasets.Image): dtype = "image" image = dataset[0][key] # Assuming first row channels = get_image_pixel_channels(image) shape = (image.height, image.width, channels) names = ["height", "width", "channels"] elif ft._type == "VideoFrame": dtype = "video" shape = None # Add shape later names = ["height", "width", "channels"] features[key] = { "dtype": dtype, "shape": shape, "names": names, } return features def add_task_index_by_episodes(dataset: Dataset, tasks_by_episodes: dict) -> tuple[Dataset, list[str]]: df = dataset.to_pandas() tasks = list(set(tasks_by_episodes.values())) tasks_to_task_index = {task: task_idx for task_idx, task in enumerate(tasks)} episodes_to_task_index = {ep_idx: tasks_to_task_index[task] for ep_idx, task in tasks_by_episodes.items()} df["task_index"] = df["episode_index"].map(episodes_to_task_index).astype(int) features = dataset.features features["task_index"] = datasets.Value(dtype="int64") dataset = Dataset.from_pandas(df, features=features, split="train") return dataset, tasks def add_task_index_from_tasks_col( dataset: Dataset, tasks_col: str ) -> tuple[Dataset, dict[str, list[str]], list[str]]: df = dataset.to_pandas() # HACK: This is to clean some of the instructions in our version of Open X datasets prefix_to_clean = "tf.Tensor(b'" suffix_to_clean = "', shape=(), dtype=string)" df[tasks_col] = df[tasks_col].str.removeprefix(prefix_to_clean).str.removesuffix(suffix_to_clean) # Create task_index col tasks_by_episode = df.groupby("episode_index")[tasks_col].unique().apply(lambda x: x.tolist()).to_dict() tasks = df[tasks_col].unique().tolist() tasks_to_task_index = {task: idx for idx, task in enumerate(tasks)} df["task_index"] = df[tasks_col].map(tasks_to_task_index).astype(int) # Build the dataset back from df features = dataset.features features["task_index"] = datasets.Value(dtype="int64") dataset = Dataset.from_pandas(df, features=features, split="train") dataset = dataset.remove_columns(tasks_col) return dataset, tasks, tasks_by_episode def split_parquet_by_episodes( dataset: Dataset, total_episodes: int, total_chunks: int, output_dir: Path, ) -> list: table = dataset.data.table episode_lengths = [] for ep_chunk in range(total_chunks): ep_chunk_start = DEFAULT_CHUNK_SIZE * ep_chunk ep_chunk_end = min(DEFAULT_CHUNK_SIZE * (ep_chunk + 1), total_episodes) chunk_dir = "/".join(DEFAULT_PARQUET_PATH.split("/")[:-1]).format(episode_chunk=ep_chunk) (output_dir / chunk_dir).mkdir(parents=True, exist_ok=True) for ep_idx in range(ep_chunk_start, ep_chunk_end): ep_table = table.filter(pc.equal(table["episode_index"], ep_idx)) episode_lengths.insert(ep_idx, len(ep_table)) output_file = output_dir / DEFAULT_PARQUET_PATH.format( episode_chunk=ep_chunk, episode_index=ep_idx ) pq.write_table(ep_table, output_file) return episode_lengths def move_videos( repo_id: str, video_keys: list[str], total_episodes: int, total_chunks: int, work_dir: Path, clean_gittatributes: Path, branch: str = "main", ) -> None: """ HACK: Since HfApi() doesn't provide a way to move files directly in a repo, this function will run git commands to fetch git lfs video files references to move them into subdirectories without having to actually download them. """ _lfs_clone(repo_id, work_dir, branch) videos_moved = False video_files = [str(f.relative_to(work_dir)) for f in work_dir.glob("videos*/*.mp4")] if len(video_files) == 0: video_files = [str(f.relative_to(work_dir)) for f in work_dir.glob("videos*/*/*/*.mp4")] videos_moved = True # Videos have already been moved assert len(video_files) == total_episodes * len(video_keys) lfs_untracked_videos = _get_lfs_untracked_videos(work_dir, video_files) current_gittatributes = work_dir / ".gitattributes" if not filecmp.cmp(current_gittatributes, clean_gittatributes, shallow=False): fix_gitattributes(work_dir, current_gittatributes, clean_gittatributes) if lfs_untracked_videos: fix_lfs_video_files_tracking(work_dir, video_files) if videos_moved: return video_dirs = sorted(work_dir.glob("videos*/")) for ep_chunk in range(total_chunks): ep_chunk_start = DEFAULT_CHUNK_SIZE * ep_chunk ep_chunk_end = min(DEFAULT_CHUNK_SIZE * (ep_chunk + 1), total_episodes) for vid_key in video_keys: chunk_dir = "/".join(DEFAULT_VIDEO_PATH.split("/")[:-1]).format( episode_chunk=ep_chunk, video_key=vid_key ) (work_dir / chunk_dir).mkdir(parents=True, exist_ok=True) for ep_idx in range(ep_chunk_start, ep_chunk_end): target_path = DEFAULT_VIDEO_PATH.format( episode_chunk=ep_chunk, video_key=vid_key, episode_index=ep_idx ) video_file = V1_VIDEO_FILE.format(video_key=vid_key, episode_index=ep_idx) if len(video_dirs) == 1: video_path = video_dirs[0] / video_file else: for dir in video_dirs: if (dir / video_file).is_file(): video_path = dir / video_file break video_path.rename(work_dir / target_path) commit_message = "Move video files into chunk subdirectories" subprocess.run(["git", "add", "."], cwd=work_dir, check=True) subprocess.run(["git", "commit", "-m", commit_message], cwd=work_dir, check=True) subprocess.run(["git", "push"], cwd=work_dir, check=True) def fix_lfs_video_files_tracking(work_dir: Path, lfs_untracked_videos: list[str]) -> None: """ HACK: This function fixes the tracking by git lfs which was not properly set on some repos. In that case, there's no other option than to download the actual files and reupload them with lfs tracking. """ for i in range(0, len(lfs_untracked_videos), 100): files = lfs_untracked_videos[i : i + 100] try: subprocess.run(["git", "rm", "--cached", *files], cwd=work_dir, capture_output=True, check=True) except subprocess.CalledProcessError as e: print("git rm --cached ERROR:") print(e.stderr) subprocess.run(["git", "add", *files], cwd=work_dir, check=True) commit_message = "Track video files with git lfs" subprocess.run(["git", "commit", "-m", commit_message], cwd=work_dir, check=True) subprocess.run(["git", "push"], cwd=work_dir, check=True) def fix_gitattributes(work_dir: Path, current_gittatributes: Path, clean_gittatributes: Path) -> None: shutil.copyfile(clean_gittatributes, current_gittatributes) subprocess.run(["git", "add", ".gitattributes"], cwd=work_dir, check=True) subprocess.run(["git", "commit", "-m", "Fix .gitattributes"], cwd=work_dir, check=True) subprocess.run(["git", "push"], cwd=work_dir, check=True) def _lfs_clone(repo_id: str, work_dir: Path, branch: str) -> None: subprocess.run(["git", "lfs", "install"], cwd=work_dir, check=True) repo_url = f"https://huggingface.co/datasets/{repo_id}" env = {"GIT_LFS_SKIP_SMUDGE": "1"} # Prevent downloading LFS files subprocess.run( ["git", "clone", "--branch", branch, "--single-branch", "--depth", "1", repo_url, str(work_dir)], check=True, env=env, ) def _get_lfs_untracked_videos(work_dir: Path, video_files: list[str]) -> list[str]: lfs_tracked_files = subprocess.run( ["git", "lfs", "ls-files", "-n"], cwd=work_dir, capture_output=True, text=True, check=True ) lfs_tracked_files = set(lfs_tracked_files.stdout.splitlines()) return [f for f in video_files if f not in lfs_tracked_files] def get_videos_info(repo_id: str, local_dir: Path, video_keys: list[str], branch: str) -> dict: # Assumes first episode video_files = [ DEFAULT_VIDEO_PATH.format(episode_chunk=0, video_key=vid_key, episode_index=0) for vid_key in video_keys ] hub_api = HfApi() hub_api.snapshot_download( repo_id=repo_id, repo_type="dataset", local_dir=local_dir, revision=branch, allow_patterns=video_files ) videos_info_dict = {} for vid_key, vid_path in zip(video_keys, video_files, strict=True): videos_info_dict[vid_key] = get_video_info(local_dir / vid_path) return videos_info_dict def convert_dataset( repo_id: str, local_dir: Path, single_task: str | None = None, tasks_path: Path | None = None, tasks_col: Path | None = None, robot_config: RobotConfig | None = None, test_branch: str | None = None, **card_kwargs, ): v1 = get_safe_version(repo_id, V16) v1x_dir = local_dir / V16 / repo_id v20_dir = local_dir / V20 / repo_id v1x_dir.mkdir(parents=True, exist_ok=True) v20_dir.mkdir(parents=True, exist_ok=True) hub_api = HfApi() hub_api.snapshot_download( repo_id=repo_id, repo_type="dataset", revision=v1, local_dir=v1x_dir, ignore_patterns="videos*/" ) branch = "main" if test_branch: branch = test_branch create_branch(repo_id=repo_id, branch=test_branch, repo_type="dataset") metadata_v1 = load_json(v1x_dir / V1_INFO_PATH) dataset = datasets.load_dataset("parquet", data_dir=v1x_dir / "data", split="train") features = get_features_from_hf_dataset(dataset, robot_config) video_keys = [key for key, ft in features.items() if ft["dtype"] == "video"] if single_task and "language_instruction" in dataset.column_names: logging.warning( "'single_task' provided but 'language_instruction' tasks_col found. Using 'language_instruction'.", ) single_task = None tasks_col = "language_instruction" # Episodes & chunks episode_indices = sorted(dataset.unique("episode_index")) total_episodes = len(episode_indices) assert episode_indices == list(range(total_episodes)) total_videos = total_episodes * len(video_keys) total_chunks = total_episodes // DEFAULT_CHUNK_SIZE if total_episodes % DEFAULT_CHUNK_SIZE != 0: total_chunks += 1 # Tasks if single_task: tasks_by_episodes = dict.fromkeys(episode_indices, single_task) dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes) tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()} elif tasks_path: tasks_by_episodes = load_json(tasks_path) tasks_by_episodes = {int(ep_idx): task for ep_idx, task in tasks_by_episodes.items()} dataset, tasks = add_task_index_by_episodes(dataset, tasks_by_episodes) tasks_by_episodes = {ep_idx: [task] for ep_idx, task in tasks_by_episodes.items()} elif tasks_col: dataset, tasks, tasks_by_episodes = add_task_index_from_tasks_col(dataset, tasks_col) else: raise ValueError assert set(tasks) == {task for ep_tasks in tasks_by_episodes.values() for task in ep_tasks} tasks = [{"task_index": task_idx, "task": task} for task_idx, task in enumerate(tasks)] write_jsonlines(tasks, v20_dir / TASKS_PATH) features["task_index"] = { "dtype": "int64", "shape": (1,), "names": None, } # Videos if video_keys: assert metadata_v1.get("video", False) dataset = dataset.remove_columns(video_keys) clean_gitattr = Path( hub_api.hf_hub_download( repo_id=GITATTRIBUTES_REF, repo_type="dataset", local_dir=local_dir, filename=".gitattributes" ) ).absolute() with tempfile.TemporaryDirectory() as tmp_video_dir: move_videos( repo_id, video_keys, total_episodes, total_chunks, Path(tmp_video_dir), clean_gitattr, branch ) videos_info = get_videos_info(repo_id, v1x_dir, video_keys=video_keys, branch=branch) for key in video_keys: features[key]["shape"] = ( videos_info[key].pop("video.height"), videos_info[key].pop("video.width"), videos_info[key].pop("video.channels"), ) features[key]["video_info"] = videos_info[key] assert math.isclose(videos_info[key]["video.fps"], metadata_v1["fps"], rel_tol=1e-3) if "encoding" in metadata_v1: assert videos_info[key]["video.pix_fmt"] == metadata_v1["encoding"]["pix_fmt"] else: assert metadata_v1.get("video", 0) == 0 videos_info = None # Split data into 1 parquet file by episode episode_lengths = split_parquet_by_episodes(dataset, total_episodes, total_chunks, v20_dir) if robot_config is not None: robot_type = robot_config.type repo_tags = [robot_type] else: robot_type = "unknown" repo_tags = None # Episodes episodes = [ {"episode_index": ep_idx, "tasks": tasks_by_episodes[ep_idx], "length": episode_lengths[ep_idx]} for ep_idx in episode_indices ] write_jsonlines(episodes, v20_dir / EPISODES_PATH) # Assemble metadata v2.0 metadata_v2_0 = { "codebase_version": V20, "robot_type": robot_type, "total_episodes": total_episodes, "total_frames": len(dataset), "total_tasks": len(tasks), "total_videos": total_videos, "total_chunks": total_chunks, "chunks_size": DEFAULT_CHUNK_SIZE, "fps": metadata_v1["fps"], "splits": {"train": f"0:{total_episodes}"}, "data_path": DEFAULT_PARQUET_PATH, "video_path": DEFAULT_VIDEO_PATH if video_keys else None, "features": features, } write_json(metadata_v2_0, v20_dir / INFO_PATH) convert_stats_to_json(v1x_dir, v20_dir) card = create_lerobot_dataset_card(tags=repo_tags, dataset_info=metadata_v2_0, **card_kwargs) with contextlib.suppress(EntryNotFoundError, HfHubHTTPError): hub_api.delete_folder(repo_id=repo_id, path_in_repo="data", repo_type="dataset", revision=branch) with contextlib.suppress(EntryNotFoundError, HfHubHTTPError): hub_api.delete_folder(repo_id=repo_id, path_in_repo="meta_data", repo_type="dataset", revision=branch) with contextlib.suppress(EntryNotFoundError, HfHubHTTPError): hub_api.delete_folder(repo_id=repo_id, path_in_repo="meta", repo_type="dataset", revision=branch) hub_api.upload_folder( repo_id=repo_id, path_in_repo="data", folder_path=v20_dir / "data", repo_type="dataset", revision=branch, ) hub_api.upload_folder( repo_id=repo_id, path_in_repo="meta", folder_path=v20_dir / "meta", repo_type="dataset", revision=branch, ) card.push_to_hub(repo_id=repo_id, repo_type="dataset", revision=branch) if not test_branch: create_branch(repo_id=repo_id, branch=V20, repo_type="dataset") def make_robot_config(robot_type: str, **kwargs) -> RobotConfig: if robot_type == "aloha": raise NotImplementedError # TODO elif robot_type == "koch_follower": from lerobot.robots.koch_follower import KochFollowerConfig return KochFollowerConfig(**kwargs) elif robot_type == "so100_follower": from lerobot.robots.so100_follower import SO100FollowerConfig return SO100FollowerConfig(**kwargs) elif robot_type == "stretch": from lerobot.robots.stretch3 import Stretch3RobotConfig return Stretch3RobotConfig(**kwargs) elif robot_type == "lekiwi": from lerobot.robots.lekiwi import LeKiwiConfig return LeKiwiConfig(**kwargs) else: raise ValueError(f"Robot type '{robot_type}' is not available.") def main(): parser = argparse.ArgumentParser() task_args = parser.add_mutually_exclusive_group(required=True) parser.add_argument( "--repo-id", type=str, required=True, help="Repository identifier on Hugging Face: a community or a user name `/` the name of the dataset (e.g. `lerobot/pusht`, `cadene/aloha_sim_insertion_human`).", ) task_args.add_argument( "--single-task", type=str, help="A short but accurate description of the single task performed in the dataset.", ) task_args.add_argument( "--tasks-col", type=str, help="The name of the column containing language instructions", ) task_args.add_argument( "--tasks-path", type=Path, help="The path to a .json file containing one language instruction for each episode_index", ) parser.add_argument( "--robot", type=str, default=None, help="Robot config used for the dataset during conversion (e.g. 'koch', 'aloha', 'so100', etc.)", ) parser.add_argument( "--local-dir", type=Path, default=None, help="Local directory to store the dataset during conversion. Defaults to /tmp/lerobot_dataset_v2", ) parser.add_argument( "--license", type=str, default="apache-2.0", help="Repo license. Must be one of https://huggingface.co/docs/hub/repositories-licenses. Defaults to mit.", ) parser.add_argument( "--test-branch", type=str, default=None, help="Repo branch to test your conversion first (e.g. 'v2.0.test')", ) args = parser.parse_args() if not args.local_dir: args.local_dir = Path("/tmp/lerobot_dataset_v2") if args.robot is not None: robot_config = make_robot_config(args.robot) del args.robot convert_dataset(**vars(args), robot_config=robot_config) if __name__ == "__main__": main()
lerobot/src/lerobot/datasets/v2/convert_dataset_v1_to_v2.py/0
{ "file_path": "lerobot/src/lerobot/datasets/v2/convert_dataset_v1_to_v2.py", "repo_id": "lerobot", "token_count": 10869 }
209
#!/usr/bin/env python # Copyright 2024 Columbia Artificial Intelligence, Robotics Lab, # and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import NormalizationMode from lerobot.optim.optimizers import AdamConfig from lerobot.optim.schedulers import DiffuserSchedulerConfig @PreTrainedConfig.register_subclass("diffusion") @dataclass class DiffusionConfig(PreTrainedConfig): """Configuration class for DiffusionPolicy. Defaults are configured for training with PushT providing proprioceptive and single camera observations. The parameters you will most likely need to change are the ones which depend on the environment / sensors. Those are: `input_shapes` and `output_shapes`. Notes on the inputs and outputs: - "observation.state" is required as an input key. - Either: - At least one key starting with "observation.image is required as an input. AND/OR - The key "observation.environment_state" is required as input. - If there are multiple keys beginning with "observation.image" they are treated as multiple camera views. Right now we only support all images having the same shape. - "action" is required as an output key. Args: n_obs_steps: Number of environment steps worth of observations to pass to the policy (takes the current step and additional steps going back). horizon: Diffusion model action prediction size as detailed in `DiffusionPolicy.select_action`. n_action_steps: The number of action steps to run in the environment for one invocation of the policy. See `DiffusionPolicy.select_action` for more details. input_shapes: A dictionary defining the shapes of the input data for the policy. The key represents the input data name, and the value is a list indicating the dimensions of the corresponding data. For example, "observation.image" refers to an input from a camera with dimensions [3, 96, 96], indicating it has three color channels and 96x96 resolution. Importantly, `input_shapes` doesn't include batch dimension or temporal dimension. output_shapes: A dictionary defining the shapes of the output data for the policy. The key represents the output data name, and the value is a list indicating the dimensions of the corresponding data. For example, "action" refers to an output shape of [14], indicating 14-dimensional actions. Importantly, `output_shapes` doesn't include batch dimension or temporal dimension. input_normalization_modes: A dictionary with key representing the modality (e.g. "observation.state"), and the value specifies the normalization mode to apply. The two available modes are "mean_std" which subtracts the mean and divides by the standard deviation and "min_max" which rescale in a [-1, 1] range. output_normalization_modes: Similar dictionary as `normalize_input_modes`, but to unnormalize to the original scale. Note that this is also used for normalizing the training targets. vision_backbone: Name of the torchvision resnet backbone to use for encoding images. crop_shape: (H, W) shape to crop images to as a preprocessing step for the vision backbone. Must fit within the image size. If None, no cropping is done. crop_is_random: Whether the crop should be random at training time (it's always a center crop in eval mode). pretrained_backbone_weights: Pretrained weights from torchvision to initialize the backbone. `None` means no pretrained weights. use_group_norm: Whether to replace batch normalization with group normalization in the backbone. The group sizes are set to be about 16 (to be precise, feature_dim // 16). spatial_softmax_num_keypoints: Number of keypoints for SpatialSoftmax. use_separate_rgb_encoders_per_camera: Whether to use a separate RGB encoder for each camera view. down_dims: Feature dimension for each stage of temporal downsampling in the diffusion modeling Unet. You may provide a variable number of dimensions, therefore also controlling the degree of downsampling. kernel_size: The convolutional kernel size of the diffusion modeling Unet. n_groups: Number of groups used in the group norm of the Unet's convolutional blocks. diffusion_step_embed_dim: The Unet is conditioned on the diffusion timestep via a small non-linear network. This is the output dimension of that network, i.e., the embedding dimension. use_film_scale_modulation: FiLM (https://huggingface.co/papers/1709.07871) is used for the Unet conditioning. Bias modulation is used be default, while this parameter indicates whether to also use scale modulation. noise_scheduler_type: Name of the noise scheduler to use. Supported options: ["DDPM", "DDIM"]. num_train_timesteps: Number of diffusion steps for the forward diffusion schedule. beta_schedule: Name of the diffusion beta schedule as per DDPMScheduler from Hugging Face diffusers. beta_start: Beta value for the first forward-diffusion step. beta_end: Beta value for the last forward-diffusion step. prediction_type: The type of prediction that the diffusion modeling Unet makes. Choose from "epsilon" or "sample". These have equivalent outcomes from a latent variable modeling perspective, but "epsilon" has been shown to work better in many deep neural network settings. clip_sample: Whether to clip the sample to [-`clip_sample_range`, +`clip_sample_range`] for each denoising step at inference time. WARNING: you will need to make sure your action-space is normalized to fit within this range. clip_sample_range: The magnitude of the clipping range as described above. num_inference_steps: Number of reverse diffusion steps to use at inference time (steps are evenly spaced). If not provided, this defaults to be the same as `num_train_timesteps`. do_mask_loss_for_padding: Whether to mask the loss when there are copy-padded actions. See `LeRobotDataset` and `load_previous_and_future_frames` for more information. Note, this defaults to False as the original Diffusion Policy implementation does the same. """ # Inputs / output structure. n_obs_steps: int = 2 horizon: int = 16 n_action_steps: int = 8 normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { "VISUAL": NormalizationMode.MEAN_STD, "STATE": NormalizationMode.MIN_MAX, "ACTION": NormalizationMode.MIN_MAX, } ) # The original implementation doesn't sample frames for the last 7 steps, # which avoids excessive padding and leads to improved training results. drop_n_last_frames: int = 7 # horizon - n_action_steps - n_obs_steps + 1 # Architecture / modeling. # Vision backbone. vision_backbone: str = "resnet18" crop_shape: tuple[int, int] | None = (84, 84) crop_is_random: bool = True pretrained_backbone_weights: str | None = None use_group_norm: bool = True spatial_softmax_num_keypoints: int = 32 use_separate_rgb_encoder_per_camera: bool = False # Unet. down_dims: tuple[int, ...] = (512, 1024, 2048) kernel_size: int = 5 n_groups: int = 8 diffusion_step_embed_dim: int = 128 use_film_scale_modulation: bool = True # Noise scheduler. noise_scheduler_type: str = "DDPM" num_train_timesteps: int = 100 beta_schedule: str = "squaredcos_cap_v2" beta_start: float = 0.0001 beta_end: float = 0.02 prediction_type: str = "epsilon" clip_sample: bool = True clip_sample_range: float = 1.0 # Inference num_inference_steps: int | None = None # Loss computation do_mask_loss_for_padding: bool = False # Training presets optimizer_lr: float = 1e-4 optimizer_betas: tuple = (0.95, 0.999) optimizer_eps: float = 1e-8 optimizer_weight_decay: float = 1e-6 scheduler_name: str = "cosine" scheduler_warmup_steps: int = 500 def __post_init__(self): super().__post_init__() """Input validation (not exhaustive).""" if not self.vision_backbone.startswith("resnet"): raise ValueError( f"`vision_backbone` must be one of the ResNet variants. Got {self.vision_backbone}." ) supported_prediction_types = ["epsilon", "sample"] if self.prediction_type not in supported_prediction_types: raise ValueError( f"`prediction_type` must be one of {supported_prediction_types}. Got {self.prediction_type}." ) supported_noise_schedulers = ["DDPM", "DDIM"] if self.noise_scheduler_type not in supported_noise_schedulers: raise ValueError( f"`noise_scheduler_type` must be one of {supported_noise_schedulers}. " f"Got {self.noise_scheduler_type}." ) # Check that the horizon size and U-Net downsampling is compatible. # U-Net downsamples by 2 with each stage. downsampling_factor = 2 ** len(self.down_dims) if self.horizon % downsampling_factor != 0: raise ValueError( "The horizon should be an integer multiple of the downsampling factor (which is determined " f"by `len(down_dims)`). Got {self.horizon=} and {self.down_dims=}" ) def get_optimizer_preset(self) -> AdamConfig: return AdamConfig( lr=self.optimizer_lr, betas=self.optimizer_betas, eps=self.optimizer_eps, weight_decay=self.optimizer_weight_decay, ) def get_scheduler_preset(self) -> DiffuserSchedulerConfig: return DiffuserSchedulerConfig( name=self.scheduler_name, num_warmup_steps=self.scheduler_warmup_steps, ) def validate_features(self) -> None: if len(self.image_features) == 0 and self.env_state_feature is None: raise ValueError("You must provide at least one image or the environment state among the inputs.") if self.crop_shape is not None: for key, image_ft in self.image_features.items(): if self.crop_shape[0] > image_ft.shape[1] or self.crop_shape[1] > image_ft.shape[2]: raise ValueError( f"`crop_shape` should fit within the images shapes. Got {self.crop_shape} " f"for `crop_shape` and {image_ft.shape} for " f"`{key}`." ) # Check that all input images have the same shape. if len(self.image_features) > 0: first_image_key, first_image_ft = next(iter(self.image_features.items())) for key, image_ft in self.image_features.items(): if image_ft.shape != first_image_ft.shape: raise ValueError( f"`{key}` does not match `{first_image_key}`, but we expect all image shapes to match." ) @property def observation_delta_indices(self) -> list: return list(range(1 - self.n_obs_steps, 1)) @property def action_delta_indices(self) -> list: return list(range(1 - self.n_obs_steps, 1 - self.n_obs_steps + self.horizon)) @property def reward_delta_indices(self) -> None: return None
lerobot/src/lerobot/policies/diffusion/configuration_diffusion.py/0
{ "file_path": "lerobot/src/lerobot/policies/diffusion/configuration_diffusion.py", "repo_id": "lerobot", "token_count": 4577 }
210
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections.abc import Callable from dataclasses import asdict from typing import Literal import einops import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # noqa: N812 from torch import Tensor from torch.distributions import MultivariateNormal, TanhTransform, Transform, TransformedDistribution from lerobot.policies.normalize import NormalizeBuffer from lerobot.policies.pretrained import PreTrainedPolicy from lerobot.policies.sac.configuration_sac import SACConfig, is_image_feature from lerobot.policies.utils import get_device_from_parameters DISCRETE_DIMENSION_INDEX = -1 # Gripper is always the last dimension class SACPolicy( PreTrainedPolicy, ): config_class = SACConfig name = "sac" def __init__( self, config: SACConfig | None = None, dataset_stats: dict[str, dict[str, Tensor]] | None = None, ): super().__init__(config) config.validate_features() self.config = config # Determine action dimension and initialize all components continuous_action_dim = config.output_features["action"].shape[0] self._init_normalization(dataset_stats) self._init_encoders() self._init_critics(continuous_action_dim) self._init_actor(continuous_action_dim) self._init_temperature() def get_optim_params(self) -> dict: optim_params = { "actor": [ p for n, p in self.actor.named_parameters() if not n.startswith("encoder") or not self.shared_encoder ], "critic": self.critic_ensemble.parameters(), "temperature": self.log_alpha, } if self.config.num_discrete_actions is not None: optim_params["discrete_critic"] = self.discrete_critic.parameters() return optim_params def reset(self): """Reset the policy""" pass @torch.no_grad() def predict_action_chunk(self, batch: dict[str, Tensor]) -> Tensor: """Predict a chunk of actions given environment observations.""" raise NotImplementedError("SACPolicy does not support action chunking. It returns single actions!") @torch.no_grad() def select_action(self, batch: dict[str, Tensor]) -> Tensor: """Select action for inference/evaluation""" observations_features = None if self.shared_encoder and self.actor.encoder.has_images: # Cache and normalize image features observations_features = self.actor.encoder.get_cached_image_features(batch, normalize=True) actions, _, _ = self.actor(batch, observations_features) if self.config.num_discrete_actions is not None: discrete_action_value = self.discrete_critic(batch, observations_features) discrete_action = torch.argmax(discrete_action_value, dim=-1, keepdim=True) actions = torch.cat([actions, discrete_action], dim=-1) return actions def critic_forward( self, observations: dict[str, Tensor], actions: Tensor, use_target: bool = False, observation_features: Tensor | None = None, ) -> Tensor: """Forward pass through a critic network ensemble Args: observations: Dictionary of observations actions: Action tensor use_target: If True, use target critics, otherwise use ensemble critics Returns: Tensor of Q-values from all critics """ critics = self.critic_target if use_target else self.critic_ensemble q_values = critics(observations, actions, observation_features) return q_values def discrete_critic_forward( self, observations, use_target=False, observation_features=None ) -> torch.Tensor: """Forward pass through a discrete critic network Args: observations: Dictionary of observations use_target: If True, use target critics, otherwise use ensemble critics observation_features: Optional pre-computed observation features to avoid recomputing encoder output Returns: Tensor of Q-values from the discrete critic network """ discrete_critic = self.discrete_critic_target if use_target else self.discrete_critic q_values = discrete_critic(observations, observation_features) return q_values def forward( self, batch: dict[str, Tensor | dict[str, Tensor]], model: Literal["actor", "critic", "temperature", "discrete_critic"] = "critic", ) -> dict[str, Tensor]: """Compute the loss for the given model Args: batch: Dictionary containing: - action: Action tensor - reward: Reward tensor - state: Observations tensor dict - next_state: Next observations tensor dict - done: Done mask tensor - observation_feature: Optional pre-computed observation features - next_observation_feature: Optional pre-computed next observation features model: Which model to compute the loss for ("actor", "critic", "discrete_critic", or "temperature") Returns: The computed loss tensor """ # Extract common components from batch actions: Tensor = batch["action"] observations: dict[str, Tensor] = batch["state"] observation_features: Tensor = batch.get("observation_feature") if model == "critic": # Extract critic-specific components rewards: Tensor = batch["reward"] next_observations: dict[str, Tensor] = batch["next_state"] done: Tensor = batch["done"] next_observation_features: Tensor = batch.get("next_observation_feature") loss_critic = self.compute_loss_critic( observations=observations, actions=actions, rewards=rewards, next_observations=next_observations, done=done, observation_features=observation_features, next_observation_features=next_observation_features, ) return {"loss_critic": loss_critic} if model == "discrete_critic" and self.config.num_discrete_actions is not None: # Extract critic-specific components rewards: Tensor = batch["reward"] next_observations: dict[str, Tensor] = batch["next_state"] done: Tensor = batch["done"] next_observation_features: Tensor = batch.get("next_observation_feature") complementary_info = batch.get("complementary_info") loss_discrete_critic = self.compute_loss_discrete_critic( observations=observations, actions=actions, rewards=rewards, next_observations=next_observations, done=done, observation_features=observation_features, next_observation_features=next_observation_features, complementary_info=complementary_info, ) return {"loss_discrete_critic": loss_discrete_critic} if model == "actor": return { "loss_actor": self.compute_loss_actor( observations=observations, observation_features=observation_features, ) } if model == "temperature": return { "loss_temperature": self.compute_loss_temperature( observations=observations, observation_features=observation_features, ) } raise ValueError(f"Unknown model type: {model}") def update_target_networks(self): """Update target networks with exponential moving average""" for target_param, param in zip( self.critic_target.parameters(), self.critic_ensemble.parameters(), strict=True, ): target_param.data.copy_( param.data * self.config.critic_target_update_weight + target_param.data * (1.0 - self.config.critic_target_update_weight) ) if self.config.num_discrete_actions is not None: for target_param, param in zip( self.discrete_critic_target.parameters(), self.discrete_critic.parameters(), strict=True, ): target_param.data.copy_( param.data * self.config.critic_target_update_weight + target_param.data * (1.0 - self.config.critic_target_update_weight) ) def update_temperature(self): self.temperature = self.log_alpha.exp().item() def compute_loss_critic( self, observations, actions, rewards, next_observations, done, observation_features: Tensor | None = None, next_observation_features: Tensor | None = None, ) -> Tensor: with torch.no_grad(): next_action_preds, next_log_probs, _ = self.actor(next_observations, next_observation_features) # 2- compute q targets q_targets = self.critic_forward( observations=next_observations, actions=next_action_preds, use_target=True, observation_features=next_observation_features, ) # subsample critics to prevent overfitting if use high UTD (update to date) # TODO: Get indices before forward pass to avoid unnecessary computation if self.config.num_subsample_critics is not None: indices = torch.randperm(self.config.num_critics) indices = indices[: self.config.num_subsample_critics] q_targets = q_targets[indices] # critics subsample size min_q, _ = q_targets.min(dim=0) # Get values from min operation if self.config.use_backup_entropy: min_q = min_q - (self.temperature * next_log_probs) td_target = rewards + (1 - done) * self.config.discount * min_q # 3- compute predicted qs if self.config.num_discrete_actions is not None: # NOTE: We only want to keep the continuous action part # In the buffer we have the full action space (continuous + discrete) # We need to split them before concatenating them in the critic forward actions: Tensor = actions[:, :DISCRETE_DIMENSION_INDEX] q_preds = self.critic_forward( observations=observations, actions=actions, use_target=False, observation_features=observation_features, ) # 4- Calculate loss # Compute state-action value loss (TD loss) for all of the Q functions in the ensemble. td_target_duplicate = einops.repeat(td_target, "b -> e b", e=q_preds.shape[0]) # You compute the mean loss of the batch for each critic and then to compute the final loss you sum them up critics_loss = ( F.mse_loss( input=q_preds, target=td_target_duplicate, reduction="none", ).mean(dim=1) ).sum() return critics_loss def compute_loss_discrete_critic( self, observations, actions, rewards, next_observations, done, observation_features=None, next_observation_features=None, complementary_info=None, ): # NOTE: We only want to keep the discrete action part # In the buffer we have the full action space (continuous + discrete) # We need to split them before concatenating them in the critic forward actions_discrete: Tensor = actions[:, DISCRETE_DIMENSION_INDEX:].clone() actions_discrete = torch.round(actions_discrete) actions_discrete = actions_discrete.long() discrete_penalties: Tensor | None = None if complementary_info is not None: discrete_penalties: Tensor | None = complementary_info.get("discrete_penalty") with torch.no_grad(): # For DQN, select actions using online network, evaluate with target network next_discrete_qs = self.discrete_critic_forward( next_observations, use_target=False, observation_features=next_observation_features ) best_next_discrete_action = torch.argmax(next_discrete_qs, dim=-1, keepdim=True) # Get target Q-values from target network target_next_discrete_qs = self.discrete_critic_forward( observations=next_observations, use_target=True, observation_features=next_observation_features, ) # Use gather to select Q-values for best actions target_next_discrete_q = torch.gather( target_next_discrete_qs, dim=1, index=best_next_discrete_action ).squeeze(-1) # Compute target Q-value with Bellman equation rewards_discrete = rewards if discrete_penalties is not None: rewards_discrete = rewards + discrete_penalties target_discrete_q = rewards_discrete + (1 - done) * self.config.discount * target_next_discrete_q # Get predicted Q-values for current observations predicted_discrete_qs = self.discrete_critic_forward( observations=observations, use_target=False, observation_features=observation_features ) # Use gather to select Q-values for taken actions predicted_discrete_q = torch.gather(predicted_discrete_qs, dim=1, index=actions_discrete).squeeze(-1) # Compute MSE loss between predicted and target Q-values discrete_critic_loss = F.mse_loss(input=predicted_discrete_q, target=target_discrete_q) return discrete_critic_loss def compute_loss_temperature(self, observations, observation_features: Tensor | None = None) -> Tensor: """Compute the temperature loss""" # calculate temperature loss with torch.no_grad(): _, log_probs, _ = self.actor(observations, observation_features) temperature_loss = (-self.log_alpha.exp() * (log_probs + self.target_entropy)).mean() return temperature_loss def compute_loss_actor( self, observations, observation_features: Tensor | None = None, ) -> Tensor: actions_pi, log_probs, _ = self.actor(observations, observation_features) q_preds = self.critic_forward( observations=observations, actions=actions_pi, use_target=False, observation_features=observation_features, ) min_q_preds = q_preds.min(dim=0)[0] actor_loss = ((self.temperature * log_probs) - min_q_preds).mean() return actor_loss def _init_normalization(self, dataset_stats): """Initialize input/output normalization modules.""" self.normalize_inputs = nn.Identity() self.normalize_targets = nn.Identity() if self.config.dataset_stats is not None: params = _convert_normalization_params_to_tensor(self.config.dataset_stats) self.normalize_inputs = NormalizeBuffer( self.config.input_features, self.config.normalization_mapping, params ) stats = dataset_stats or params self.normalize_targets = NormalizeBuffer( self.config.output_features, self.config.normalization_mapping, stats ) def _init_encoders(self): """Initialize shared or separate encoders for actor and critic.""" self.shared_encoder = self.config.shared_encoder self.encoder_critic = SACObservationEncoder(self.config, self.normalize_inputs) self.encoder_actor = ( self.encoder_critic if self.shared_encoder else SACObservationEncoder(self.config, self.normalize_inputs) ) def _init_critics(self, continuous_action_dim): """Build critic ensemble, targets, and optional discrete critic.""" heads = [ CriticHead( input_dim=self.encoder_critic.output_dim + continuous_action_dim, **asdict(self.config.critic_network_kwargs), ) for _ in range(self.config.num_critics) ] self.critic_ensemble = CriticEnsemble( encoder=self.encoder_critic, ensemble=heads, output_normalization=self.normalize_targets ) target_heads = [ CriticHead( input_dim=self.encoder_critic.output_dim + continuous_action_dim, **asdict(self.config.critic_network_kwargs), ) for _ in range(self.config.num_critics) ] self.critic_target = CriticEnsemble( encoder=self.encoder_critic, ensemble=target_heads, output_normalization=self.normalize_targets ) self.critic_target.load_state_dict(self.critic_ensemble.state_dict()) if self.config.use_torch_compile: self.critic_ensemble = torch.compile(self.critic_ensemble) self.critic_target = torch.compile(self.critic_target) if self.config.num_discrete_actions is not None: self._init_discrete_critics() def _init_discrete_critics(self): """Build discrete discrete critic ensemble and target networks.""" self.discrete_critic = DiscreteCritic( encoder=self.encoder_critic, input_dim=self.encoder_critic.output_dim, output_dim=self.config.num_discrete_actions, **asdict(self.config.discrete_critic_network_kwargs), ) self.discrete_critic_target = DiscreteCritic( encoder=self.encoder_critic, input_dim=self.encoder_critic.output_dim, output_dim=self.config.num_discrete_actions, **asdict(self.config.discrete_critic_network_kwargs), ) # TODO: (maractingi, azouitine) Compile the discrete critic self.discrete_critic_target.load_state_dict(self.discrete_critic.state_dict()) def _init_actor(self, continuous_action_dim): """Initialize policy actor network and default target entropy.""" # NOTE: The actor select only the continuous action part self.actor = Policy( encoder=self.encoder_actor, network=MLP(input_dim=self.encoder_actor.output_dim, **asdict(self.config.actor_network_kwargs)), action_dim=continuous_action_dim, encoder_is_shared=self.shared_encoder, **asdict(self.config.policy_kwargs), ) self.target_entropy = self.config.target_entropy if self.target_entropy is None: dim = continuous_action_dim + (1 if self.config.num_discrete_actions is not None else 0) self.target_entropy = -np.prod(dim) / 2 def _init_temperature(self): """Set up temperature parameter and initial log_alpha.""" temp_init = self.config.temperature_init self.log_alpha = nn.Parameter(torch.tensor([math.log(temp_init)])) self.temperature = self.log_alpha.exp().item() class SACObservationEncoder(nn.Module): """Encode image and/or state vector observations.""" def __init__(self, config: SACConfig, input_normalizer: nn.Module) -> None: super().__init__() self.config = config self.input_normalization = input_normalizer self._init_image_layers() self._init_state_layers() self._compute_output_dim() def _init_image_layers(self) -> None: self.image_keys = [k for k in self.config.input_features if is_image_feature(k)] self.has_images = bool(self.image_keys) if not self.has_images: return if self.config.vision_encoder_name is not None: self.image_encoder = PretrainedImageEncoder(self.config) else: self.image_encoder = DefaultImageEncoder(self.config) if self.config.freeze_vision_encoder: freeze_image_encoder(self.image_encoder) dummy = torch.zeros(1, *self.config.input_features[self.image_keys[0]].shape) with torch.no_grad(): _, channels, height, width = self.image_encoder(dummy).shape self.spatial_embeddings = nn.ModuleDict() self.post_encoders = nn.ModuleDict() for key in self.image_keys: name = key.replace(".", "_") self.spatial_embeddings[name] = SpatialLearnedEmbeddings( height=height, width=width, channel=channels, num_features=self.config.image_embedding_pooling_dim, ) self.post_encoders[name] = nn.Sequential( nn.Dropout(0.1), nn.Linear( in_features=channels * self.config.image_embedding_pooling_dim, out_features=self.config.latent_dim, ), nn.LayerNorm(normalized_shape=self.config.latent_dim), nn.Tanh(), ) def _init_state_layers(self) -> None: self.has_env = "observation.environment_state" in self.config.input_features self.has_state = "observation.state" in self.config.input_features if self.has_env: dim = self.config.input_features["observation.environment_state"].shape[0] self.env_encoder = nn.Sequential( nn.Linear(dim, self.config.latent_dim), nn.LayerNorm(self.config.latent_dim), nn.Tanh(), ) if self.has_state: dim = self.config.input_features["observation.state"].shape[0] self.state_encoder = nn.Sequential( nn.Linear(dim, self.config.latent_dim), nn.LayerNorm(self.config.latent_dim), nn.Tanh(), ) def _compute_output_dim(self) -> None: out = 0 if self.has_images: out += len(self.image_keys) * self.config.latent_dim if self.has_env: out += self.config.latent_dim if self.has_state: out += self.config.latent_dim self._out_dim = out def forward( self, obs: dict[str, Tensor], cache: dict[str, Tensor] | None = None, detach: bool = False ) -> Tensor: obs = self.input_normalization(obs) parts = [] if self.has_images: if cache is None: cache = self.get_cached_image_features(obs, normalize=False) parts.append(self._encode_images(cache, detach)) if self.has_env: parts.append(self.env_encoder(obs["observation.environment_state"])) if self.has_state: parts.append(self.state_encoder(obs["observation.state"])) if parts: return torch.cat(parts, dim=-1) raise ValueError( "No parts to concatenate, you should have at least one image or environment state or state" ) def get_cached_image_features(self, obs: dict[str, Tensor], normalize: bool = False) -> dict[str, Tensor]: """Extract and optionally cache image features from observations. This function processes image observations through the vision encoder once and returns the resulting features. When the image encoder is shared between actor and critics AND frozen, these features can be safely cached and reused across policy components (actor, critic, discrete_critic), avoiding redundant forward passes. Performance impact: - The vision encoder forward pass is typically the main computational bottleneck during training and inference - Caching these features can provide 2-4x speedup in training and inference Normalization behavior: - When called from inside forward(): set normalize=False since inputs are already normalized - When called from outside forward(): set normalize=True to ensure proper input normalization Usage patterns: - Called in select_action() with normalize=True - Called in learner.py's get_observation_features() to pre-compute features for all policy components - Called internally by forward() with normalize=False Args: obs: Dictionary of observation tensors containing image keys normalize: Whether to normalize observations before encoding Set to True when calling directly from outside the encoder's forward method Set to False when calling from within forward() where inputs are already normalized Returns: Dictionary mapping image keys to their corresponding encoded features """ if normalize: obs = self.input_normalization(obs) batched = torch.cat([obs[k] for k in self.image_keys], dim=0) out = self.image_encoder(batched) chunks = torch.chunk(out, len(self.image_keys), dim=0) return dict(zip(self.image_keys, chunks, strict=False)) def _encode_images(self, cache: dict[str, Tensor], detach: bool) -> Tensor: """Encode image features from cached observations. This function takes pre-encoded image features from the cache and applies spatial embeddings and post-encoders. It also supports detaching the encoded features if specified. Args: cache (dict[str, Tensor]): The cached image features. detach (bool): Usually when the encoder is shared between actor and critics, we want to detach the encoded features on the policy side to avoid backprop through the encoder. More detail here `https://cdn.aaai.org/ojs/17276/17276-13-20770-1-2-20210518.pdf` Returns: Tensor: The encoded image features. """ feats = [] for k, feat in cache.items(): safe_key = k.replace(".", "_") x = self.spatial_embeddings[safe_key](feat) x = self.post_encoders[safe_key](x) if detach: x = x.detach() feats.append(x) return torch.cat(feats, dim=-1) @property def output_dim(self) -> int: return self._out_dim class MLP(nn.Module): """Multi-layer perceptron builder. Dynamically constructs a sequence of layers based on `hidden_dims`: 1) Linear (in_dim -> out_dim) 2) Optional Dropout if `dropout_rate` > 0 and (not final layer or `activate_final`) 3) LayerNorm on the output features 4) Activation (standard for intermediate layers, `final_activation` for last layer if `activate_final`) Arguments: input_dim (int): Size of input feature dimension. hidden_dims (list[int]): Sizes for each hidden layer. activations (Callable or str): Activation to apply between layers. activate_final (bool): Whether to apply activation at the final layer. dropout_rate (Optional[float]): Dropout probability applied before normalization and activation. final_activation (Optional[Callable or str]): Activation for the final layer when `activate_final` is True. For each layer, `in_dim` is updated to the previous `out_dim`. All constructed modules are stored in `self.net` as an `nn.Sequential` container. """ def __init__( self, input_dim: int, hidden_dims: list[int], activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(), activate_final: bool = False, dropout_rate: float | None = None, final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None, ): super().__init__() layers: list[nn.Module] = [] in_dim = input_dim total = len(hidden_dims) for idx, out_dim in enumerate(hidden_dims): # 1) linear transform layers.append(nn.Linear(in_dim, out_dim)) is_last = idx == total - 1 # 2-4) optionally add dropout, normalization, and activation if not is_last or activate_final: if dropout_rate and dropout_rate > 0: layers.append(nn.Dropout(p=dropout_rate)) layers.append(nn.LayerNorm(out_dim)) act_cls = final_activation if is_last and final_activation else activations act = act_cls if isinstance(act_cls, nn.Module) else getattr(nn, act_cls)() layers.append(act) in_dim = out_dim self.net = nn.Sequential(*layers) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.net(x) class CriticHead(nn.Module): def __init__( self, input_dim: int, hidden_dims: list[int], activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(), activate_final: bool = False, dropout_rate: float | None = None, init_final: float | None = None, final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None, ): super().__init__() self.net = MLP( input_dim=input_dim, hidden_dims=hidden_dims, activations=activations, activate_final=activate_final, dropout_rate=dropout_rate, final_activation=final_activation, ) self.output_layer = nn.Linear(in_features=hidden_dims[-1], out_features=1) if init_final is not None: nn.init.uniform_(self.output_layer.weight, -init_final, init_final) nn.init.uniform_(self.output_layer.bias, -init_final, init_final) else: orthogonal_init()(self.output_layer.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.output_layer(self.net(x)) class CriticEnsemble(nn.Module): """ CriticEnsemble wraps multiple CriticHead modules into an ensemble. Args: encoder (SACObservationEncoder): encoder for observations. ensemble (List[CriticHead]): list of critic heads. output_normalization (nn.Module): normalization layer for actions. init_final (float | None): optional initializer scale for final layers. Forward returns a tensor of shape (num_critics, batch_size) containing Q-values. """ def __init__( self, encoder: SACObservationEncoder, ensemble: list[CriticHead], output_normalization: nn.Module, init_final: float | None = None, ): super().__init__() self.encoder = encoder self.init_final = init_final self.output_normalization = output_normalization self.critics = nn.ModuleList(ensemble) def forward( self, observations: dict[str, torch.Tensor], actions: torch.Tensor, observation_features: torch.Tensor | None = None, ) -> torch.Tensor: device = get_device_from_parameters(self) # Move each tensor in observations to device observations = {k: v.to(device) for k, v in observations.items()} # NOTE: We normalize actions it helps for sample efficiency actions: dict[str, torch.tensor] = {"action": actions} # NOTE: Normalization layer took dict in input and outputs a dict that why actions = self.output_normalization(actions)["action"] actions = actions.to(device) obs_enc = self.encoder(observations, cache=observation_features) inputs = torch.cat([obs_enc, actions], dim=-1) # Loop through critics and collect outputs q_values = [] for critic in self.critics: q_values.append(critic(inputs)) # Stack outputs to match expected shape [num_critics, batch_size] q_values = torch.stack([q.squeeze(-1) for q in q_values], dim=0) return q_values class DiscreteCritic(nn.Module): def __init__( self, encoder: nn.Module, input_dim: int, hidden_dims: list[int], output_dim: int = 3, activations: Callable[[torch.Tensor], torch.Tensor] | str = nn.SiLU(), activate_final: bool = False, dropout_rate: float | None = None, init_final: float | None = None, final_activation: Callable[[torch.Tensor], torch.Tensor] | str | None = None, ): super().__init__() self.encoder = encoder self.output_dim = output_dim self.net = MLP( input_dim=input_dim, hidden_dims=hidden_dims, activations=activations, activate_final=activate_final, dropout_rate=dropout_rate, final_activation=final_activation, ) self.output_layer = nn.Linear(in_features=hidden_dims[-1], out_features=self.output_dim) if init_final is not None: nn.init.uniform_(self.output_layer.weight, -init_final, init_final) nn.init.uniform_(self.output_layer.bias, -init_final, init_final) else: orthogonal_init()(self.output_layer.weight) def forward( self, observations: torch.Tensor, observation_features: torch.Tensor | None = None ) -> torch.Tensor: device = get_device_from_parameters(self) observations = {k: v.to(device) for k, v in observations.items()} obs_enc = self.encoder(observations, cache=observation_features) return self.output_layer(self.net(obs_enc)) class Policy(nn.Module): def __init__( self, encoder: SACObservationEncoder, network: nn.Module, action_dim: int, std_min: float = -5, std_max: float = 2, fixed_std: torch.Tensor | None = None, init_final: float | None = None, use_tanh_squash: bool = False, encoder_is_shared: bool = False, ): super().__init__() self.encoder: SACObservationEncoder = encoder self.network = network self.action_dim = action_dim self.std_min = std_min self.std_max = std_max self.fixed_std = fixed_std self.use_tanh_squash = use_tanh_squash self.encoder_is_shared = encoder_is_shared # Find the last Linear layer's output dimension for layer in reversed(network.net): if isinstance(layer, nn.Linear): out_features = layer.out_features break # Mean layer self.mean_layer = nn.Linear(out_features, action_dim) if init_final is not None: nn.init.uniform_(self.mean_layer.weight, -init_final, init_final) nn.init.uniform_(self.mean_layer.bias, -init_final, init_final) else: orthogonal_init()(self.mean_layer.weight) # Standard deviation layer or parameter if fixed_std is None: self.std_layer = nn.Linear(out_features, action_dim) if init_final is not None: nn.init.uniform_(self.std_layer.weight, -init_final, init_final) nn.init.uniform_(self.std_layer.bias, -init_final, init_final) else: orthogonal_init()(self.std_layer.weight) def forward( self, observations: torch.Tensor, observation_features: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: # We detach the encoder if it is shared to avoid backprop through it # This is important to avoid the encoder to be updated through the policy obs_enc = self.encoder(observations, cache=observation_features, detach=self.encoder_is_shared) # Get network outputs outputs = self.network(obs_enc) means = self.mean_layer(outputs) # Compute standard deviations if self.fixed_std is None: log_std = self.std_layer(outputs) std = torch.exp(log_std) # Match JAX "exp" std = torch.clamp(std, self.std_min, self.std_max) # Match JAX default clip else: std = self.fixed_std.expand_as(means) # Build transformed distribution dist = TanhMultivariateNormalDiag(loc=means, scale_diag=std) # Sample actions (reparameterized) actions = dist.rsample() # Compute log_probs log_probs = dist.log_prob(actions) return actions, log_probs, means def get_features(self, observations: torch.Tensor) -> torch.Tensor: """Get encoded features from observations""" device = get_device_from_parameters(self) observations = observations.to(device) if self.encoder is not None: with torch.inference_mode(): return self.encoder(observations) return observations class DefaultImageEncoder(nn.Module): def __init__(self, config: SACConfig): super().__init__() image_key = next(key for key in config.input_features if is_image_feature(key)) self.image_enc_layers = nn.Sequential( nn.Conv2d( in_channels=config.input_features[image_key].shape[0], out_channels=config.image_encoder_hidden_dim, kernel_size=7, stride=2, ), nn.ReLU(), nn.Conv2d( in_channels=config.image_encoder_hidden_dim, out_channels=config.image_encoder_hidden_dim, kernel_size=5, stride=2, ), nn.ReLU(), nn.Conv2d( in_channels=config.image_encoder_hidden_dim, out_channels=config.image_encoder_hidden_dim, kernel_size=3, stride=2, ), nn.ReLU(), nn.Conv2d( in_channels=config.image_encoder_hidden_dim, out_channels=config.image_encoder_hidden_dim, kernel_size=3, stride=2, ), nn.ReLU(), ) def forward(self, x): x = self.image_enc_layers(x) return x def freeze_image_encoder(image_encoder: nn.Module): """Freeze all parameters in the encoder""" for param in image_encoder.parameters(): param.requires_grad = False class PretrainedImageEncoder(nn.Module): def __init__(self, config: SACConfig): super().__init__() self.image_enc_layers, self.image_enc_out_shape = self._load_pretrained_vision_encoder(config) def _load_pretrained_vision_encoder(self, config: SACConfig): """Set up CNN encoder""" from transformers import AutoModel self.image_enc_layers = AutoModel.from_pretrained(config.vision_encoder_name, trust_remote_code=True) if hasattr(self.image_enc_layers.config, "hidden_sizes"): self.image_enc_out_shape = self.image_enc_layers.config.hidden_sizes[-1] # Last channel dimension elif hasattr(self.image_enc_layers, "fc"): self.image_enc_out_shape = self.image_enc_layers.fc.in_features else: raise ValueError("Unsupported vision encoder architecture, make sure you are using a CNN") return self.image_enc_layers, self.image_enc_out_shape def forward(self, x): enc_feat = self.image_enc_layers(x).last_hidden_state return enc_feat def orthogonal_init(): return lambda x: torch.nn.init.orthogonal_(x, gain=1.0) class SpatialLearnedEmbeddings(nn.Module): def __init__(self, height, width, channel, num_features=8): """ PyTorch implementation of learned spatial embeddings Args: height: Spatial height of input features width: Spatial width of input features channel: Number of input channels num_features: Number of output embedding dimensions """ super().__init__() self.height = height self.width = width self.channel = channel self.num_features = num_features self.kernel = nn.Parameter(torch.empty(channel, height, width, num_features)) nn.init.kaiming_normal_(self.kernel, mode="fan_in", nonlinearity="linear") def forward(self, features): """ Forward pass for spatial embedding Args: features: Input tensor of shape [B, C, H, W] where B is batch size, C is number of channels, H is height, and W is width Returns: Output tensor of shape [B, C*F] where F is the number of features """ features_expanded = features.unsqueeze(-1) # [B, C, H, W, 1] kernel_expanded = self.kernel.unsqueeze(0) # [1, C, H, W, F] # Element-wise multiplication and spatial reduction output = (features_expanded * kernel_expanded).sum(dim=(2, 3)) # Sum over H,W dimensions # Reshape to combine channel and feature dimensions output = output.view(output.size(0), -1) # [B, C*F] return output class RescaleFromTanh(Transform): def __init__(self, low: float = -1, high: float = 1): super().__init__() self.low = low self.high = high def _call(self, x): # Rescale from (-1, 1) to (low, high) return 0.5 * (x + 1.0) * (self.high - self.low) + self.low def _inverse(self, y): # Rescale from (low, high) back to (-1, 1) return 2.0 * (y - self.low) / (self.high - self.low) - 1.0 def log_abs_det_jacobian(self, x, y): # log|d(rescale)/dx| = sum(log(0.5 * (high - low))) scale = 0.5 * (self.high - self.low) return torch.sum(torch.log(scale), dim=-1) class TanhMultivariateNormalDiag(TransformedDistribution): def __init__(self, loc, scale_diag, low=None, high=None): base_dist = MultivariateNormal(loc, torch.diag_embed(scale_diag)) transforms = [TanhTransform(cache_size=1)] if low is not None and high is not None: low = torch.as_tensor(low) high = torch.as_tensor(high) transforms.insert(0, RescaleFromTanh(low, high)) super().__init__(base_dist, transforms) def mode(self): # Mode is mean of base distribution, passed through transforms x = self.base_dist.mean for transform in self.transforms: x = transform(x) return x def stddev(self): std = self.base_dist.stddev x = std for transform in self.transforms: x = transform(x) return x def _convert_normalization_params_to_tensor(normalization_params: dict) -> dict: converted_params = {} for outer_key, inner_dict in normalization_params.items(): converted_params[outer_key] = {} for key, value in inner_dict.items(): converted_params[outer_key][key] = torch.tensor(value) if "image" in outer_key: converted_params[outer_key][key] = converted_params[outer_key][key].view(3, 1, 1) return converted_params
lerobot/src/lerobot/policies/sac/modeling_sac.py/0
{ "file_path": "lerobot/src/lerobot/policies/sac/modeling_sac.py", "repo_id": "lerobot", "token_count": 19182 }
211
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any import torch from lerobot.configs.types import PolicyFeature from lerobot.processor.pipeline import EnvTransition, TransitionKey from lerobot.utils.utils import get_safe_torch_device @dataclass class DeviceProcessor: """Processes transitions by moving tensors to the specified device. This processor ensures that all tensors in the transition are moved to the specified device (CPU or GPU) before they are returned. """ device: torch.device = "cpu" def __post_init__(self): self.device = get_safe_torch_device(self.device) self.non_blocking = "cuda" in str(self.device) def __call__(self, transition: EnvTransition) -> EnvTransition: # Create a copy of the transition new_transition = transition.copy() # Process observation tensors observation = transition.get(TransitionKey.OBSERVATION) if observation is not None: new_observation = { k: v.to(self.device, non_blocking=self.non_blocking) if isinstance(v, torch.Tensor) else v for k, v in observation.items() } new_transition[TransitionKey.OBSERVATION] = new_observation # Process action tensor action = transition.get(TransitionKey.ACTION) if action is not None and isinstance(action, torch.Tensor): new_transition[TransitionKey.ACTION] = action.to(self.device, non_blocking=self.non_blocking) # Process reward tensor reward = transition.get(TransitionKey.REWARD) if reward is not None and isinstance(reward, torch.Tensor): new_transition[TransitionKey.REWARD] = reward.to(self.device, non_blocking=self.non_blocking) # Process done tensor done = transition.get(TransitionKey.DONE) if done is not None and isinstance(done, torch.Tensor): new_transition[TransitionKey.DONE] = done.to(self.device, non_blocking=self.non_blocking) # Process truncated tensor truncated = transition.get(TransitionKey.TRUNCATED) if truncated is not None and isinstance(truncated, torch.Tensor): new_transition[TransitionKey.TRUNCATED] = truncated.to( self.device, non_blocking=self.non_blocking ) return new_transition def get_config(self) -> dict[str, Any]: """Return configuration for serialization.""" return {"device": self.device} def feature_contract(self, features: dict[str, PolicyFeature]) -> dict[str, PolicyFeature]: return features
lerobot/src/lerobot/processor/device_processor.py/0
{ "file_path": "lerobot/src/lerobot/processor/device_processor.py", "repo_id": "lerobot", "token_count": 1172 }
212
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time from functools import cached_property from typing import Any from lerobot.cameras.utils import make_cameras_from_configs from lerobot.errors import DeviceAlreadyConnectedError, DeviceNotConnectedError from lerobot.motors import Motor, MotorNormMode from lerobot.motors.calibration_gui import RangeFinderGUI from lerobot.motors.feetech import ( FeetechMotorsBus, ) from ..robot import Robot from .config_hope_jr import HopeJrHandConfig logger = logging.getLogger(__name__) RIGHT_HAND_INVERSIONS = [ "thumb_mcp", "thumb_dip", "index_ulnar_flexor", "middle_ulnar_flexor", "ring_ulnar_flexor", "ring_pip_dip", "pinky_ulnar_flexor", "pinky_pip_dip", ] LEFT_HAND_INVERSIONS = [ "thumb_cmc", "thumb_mcp", "thumb_dip", "index_radial_flexor", "index_pip_dip", "middle_radial_flexor", "middle_pip_dip", "ring_radial_flexor", "ring_pip_dip", "pinky_radial_flexor", # "pinky_pip_dip", ] class HopeJrHand(Robot): config_class = HopeJrHandConfig name = "hope_jr_hand" def __init__(self, config: HopeJrHandConfig): super().__init__(config) self.config = config self.bus = FeetechMotorsBus( port=self.config.port, motors={ # Thumb "thumb_cmc": Motor(1, "scs0009", MotorNormMode.RANGE_0_100), "thumb_mcp": Motor(2, "scs0009", MotorNormMode.RANGE_0_100), "thumb_pip": Motor(3, "scs0009", MotorNormMode.RANGE_0_100), "thumb_dip": Motor(4, "scs0009", MotorNormMode.RANGE_0_100), # Index "index_radial_flexor": Motor(5, "scs0009", MotorNormMode.RANGE_0_100), "index_ulnar_flexor": Motor(6, "scs0009", MotorNormMode.RANGE_0_100), "index_pip_dip": Motor(7, "scs0009", MotorNormMode.RANGE_0_100), # Middle "middle_radial_flexor": Motor(8, "scs0009", MotorNormMode.RANGE_0_100), "middle_ulnar_flexor": Motor(9, "scs0009", MotorNormMode.RANGE_0_100), "middle_pip_dip": Motor(10, "scs0009", MotorNormMode.RANGE_0_100), # Ring "ring_radial_flexor": Motor(11, "scs0009", MotorNormMode.RANGE_0_100), "ring_ulnar_flexor": Motor(12, "scs0009", MotorNormMode.RANGE_0_100), "ring_pip_dip": Motor(13, "scs0009", MotorNormMode.RANGE_0_100), # Pinky "pinky_radial_flexor": Motor(14, "scs0009", MotorNormMode.RANGE_0_100), "pinky_ulnar_flexor": Motor(15, "scs0009", MotorNormMode.RANGE_0_100), "pinky_pip_dip": Motor(16, "scs0009", MotorNormMode.RANGE_0_100), }, calibration=self.calibration, protocol_version=1, ) self.cameras = make_cameras_from_configs(config.cameras) self.inverted_motors = RIGHT_HAND_INVERSIONS if config.side == "right" else LEFT_HAND_INVERSIONS @property def _motors_ft(self) -> dict[str, type]: return {f"{motor}.pos": float for motor in self.bus.motors} @property def _cameras_ft(self) -> dict[str, tuple]: return { cam: (self.config.cameras[cam].height, self.config.cameras[cam].width, 3) for cam in self.cameras } @cached_property def observation_features(self) -> dict[str, type | tuple]: return {**self._motors_ft, **self._cameras_ft} @cached_property def action_features(self) -> dict[str, type]: return self._motors_ft @property def is_connected(self) -> bool: return self.bus.is_connected and all(cam.is_connected for cam in self.cameras.values()) def connect(self, calibrate: bool = True) -> None: if self.is_connected: raise DeviceAlreadyConnectedError(f"{self} already connected") self.bus.connect() if not self.is_calibrated and calibrate: self.calibrate() # Connect the cameras for cam in self.cameras.values(): cam.connect() self.configure() logger.info(f"{self} connected.") @property def is_calibrated(self) -> bool: return self.bus.is_calibrated def calibrate(self) -> None: fingers = {} for finger in ["thumb", "index", "middle", "ring", "pinky"]: fingers[finger] = [motor for motor in self.bus.motors if motor.startswith(finger)] self.calibration = RangeFinderGUI(self.bus, fingers).run() for motor in self.inverted_motors: self.calibration[motor].drive_mode = 1 self._save_calibration() print("Calibration saved to", self.calibration_fpath) def configure(self) -> None: with self.bus.torque_disabled(): self.bus.configure_motors() def setup_motors(self) -> None: # TODO: add docstring for motor in self.bus.motors: input(f"Connect the controller board to the '{motor}' motor only and press enter.") self.bus.setup_motor(motor) print(f"'{motor}' motor id set to {self.bus.motors[motor].id}") def get_observation(self) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") obs_dict = {} # Read hand position start = time.perf_counter() for motor in self.bus.motors: obs_dict[f"{motor}.pos"] = self.bus.read("Present_Position", motor) dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read state: {dt_ms:.1f}ms") # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() obs_dict[cam_key] = cam.async_read() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") return obs_dict def send_action(self, action: dict[str, Any]) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") goal_pos = {key.removesuffix(".pos"): val for key, val in action.items() if key.endswith(".pos")} self.bus.sync_write("Goal_Position", goal_pos) return action def disconnect(self): if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") self.bus.disconnect(self.config.disable_torque_on_disconnect) for cam in self.cameras.values(): cam.disconnect() logger.info(f"{self} disconnected.")
lerobot/src/lerobot/robots/hope_jr/hope_jr_hand.py/0
{ "file_path": "lerobot/src/lerobot/robots/hope_jr/hope_jr_hand.py", "repo_id": "lerobot", "token_count": 3306 }
213
# !/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import time from typing import Any import numpy as np from lerobot.cameras import make_cameras_from_configs from lerobot.errors import DeviceNotConnectedError from lerobot.model.kinematics import RobotKinematics from lerobot.motors import Motor, MotorNormMode from lerobot.motors.feetech import FeetechMotorsBus from . import SO100Follower from .config_so100_follower import SO100FollowerEndEffectorConfig logger = logging.getLogger(__name__) class SO100FollowerEndEffector(SO100Follower): """ SO100Follower robot with end-effector space control. This robot inherits from SO100Follower but transforms actions from end-effector space to joint space before sending them to the motors. """ config_class = SO100FollowerEndEffectorConfig name = "so100_follower_end_effector" def __init__(self, config: SO100FollowerEndEffectorConfig): super().__init__(config) self.bus = FeetechMotorsBus( port=self.config.port, motors={ "shoulder_pan": Motor(1, "sts3215", MotorNormMode.DEGREES), "shoulder_lift": Motor(2, "sts3215", MotorNormMode.DEGREES), "elbow_flex": Motor(3, "sts3215", MotorNormMode.DEGREES), "wrist_flex": Motor(4, "sts3215", MotorNormMode.DEGREES), "wrist_roll": Motor(5, "sts3215", MotorNormMode.DEGREES), "gripper": Motor(6, "sts3215", MotorNormMode.RANGE_0_100), }, calibration=self.calibration, ) self.cameras = make_cameras_from_configs(config.cameras) self.config = config # Initialize the kinematics module for the so100 robot if self.config.urdf_path is None: raise ValueError( "urdf_path must be provided in the configuration for end-effector control. " "Please set urdf_path in your SO100FollowerEndEffectorConfig." ) self.kinematics = RobotKinematics( urdf_path=self.config.urdf_path, target_frame_name=self.config.target_frame_name, ) # Store the bounds for end-effector position self.end_effector_bounds = self.config.end_effector_bounds self.current_ee_pos = None self.current_joint_pos = None @property def action_features(self) -> dict[str, Any]: """ Define action features for end-effector control. Returns dictionary with dtype, shape, and names. """ return { "dtype": "float32", "shape": (4,), "names": {"delta_x": 0, "delta_y": 1, "delta_z": 2, "gripper": 3}, } def send_action(self, action: dict[str, Any]) -> dict[str, Any]: """ Transform action from end-effector space to joint space and send to motors. Args: action: Dictionary with keys 'delta_x', 'delta_y', 'delta_z' for end-effector control or a numpy array with [delta_x, delta_y, delta_z] Returns: The joint-space action that was sent to the motors """ if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") # Convert action to numpy array if not already if isinstance(action, dict): if all(k in action for k in ["delta_x", "delta_y", "delta_z"]): delta_ee = np.array( [ action["delta_x"] * self.config.end_effector_step_sizes["x"], action["delta_y"] * self.config.end_effector_step_sizes["y"], action["delta_z"] * self.config.end_effector_step_sizes["z"], ], dtype=np.float32, ) if "gripper" not in action: action["gripper"] = [1.0] action = np.append(delta_ee, action["gripper"]) else: logger.warning( f"Expected action keys 'delta_x', 'delta_y', 'delta_z', got {list(action.keys())}" ) action = np.zeros(4, dtype=np.float32) if self.current_joint_pos is None: # Read current joint positions current_joint_pos = self.bus.sync_read("Present_Position") self.current_joint_pos = np.array([current_joint_pos[name] for name in self.bus.motors]) # Calculate current end-effector position using forward kinematics if self.current_ee_pos is None: self.current_ee_pos = self.kinematics.forward_kinematics(self.current_joint_pos) # Set desired end-effector position by adding delta desired_ee_pos = np.eye(4) desired_ee_pos[:3, :3] = self.current_ee_pos[:3, :3] # Keep orientation # Add delta to position and clip to bounds desired_ee_pos[:3, 3] = self.current_ee_pos[:3, 3] + action[:3] if self.end_effector_bounds is not None: desired_ee_pos[:3, 3] = np.clip( desired_ee_pos[:3, 3], self.end_effector_bounds["min"], self.end_effector_bounds["max"], ) # Compute inverse kinematics to get joint positions target_joint_values_in_degrees = self.kinematics.inverse_kinematics( self.current_joint_pos, desired_ee_pos ) # Create joint space action dictionary joint_action = { f"{key}.pos": target_joint_values_in_degrees[i] for i, key in enumerate(self.bus.motors.keys()) } # Handle gripper separately if included in action # Gripper delta action is in the range 0 - 2, # We need to shift the action to the range -1, 1 so that we can expand it to -Max_gripper_pos, Max_gripper_pos joint_action["gripper.pos"] = np.clip( self.current_joint_pos[-1] + (action[-1] - 1) * self.config.max_gripper_pos, 5, self.config.max_gripper_pos, ) self.current_ee_pos = desired_ee_pos.copy() self.current_joint_pos = target_joint_values_in_degrees.copy() self.current_joint_pos[-1] = joint_action["gripper.pos"] # Send joint space action to parent class return super().send_action(joint_action) def get_observation(self) -> dict[str, Any]: if not self.is_connected: raise DeviceNotConnectedError(f"{self} is not connected.") # Read arm position start = time.perf_counter() obs_dict = self.bus.sync_read("Present_Position") obs_dict = {f"{motor}.pos": val for motor, val in obs_dict.items()} dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read state: {dt_ms:.1f}ms") # Capture images from cameras for cam_key, cam in self.cameras.items(): start = time.perf_counter() obs_dict[cam_key] = cam.async_read() dt_ms = (time.perf_counter() - start) * 1e3 logger.debug(f"{self} read {cam_key}: {dt_ms:.1f}ms") return obs_dict def reset(self): self.current_ee_pos = None self.current_joint_pos = None
lerobot/src/lerobot/robots/so100_follower/so100_follower_end_effector.py/0
{ "file_path": "lerobot/src/lerobot/robots/so100_follower/so100_follower_end_effector.py", "repo_id": "lerobot", "token_count": 3520 }
214
#!/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple script to control a robot from teleoperation. Example: ```shell python -m lerobot.scripts.server.find_joint_limits \ --robot.type=so100_follower \ --robot.port=/dev/tty.usbmodem58760431541 \ --robot.id=black \ --teleop.type=so100_leader \ --teleop.port=/dev/tty.usbmodem58760431551 \ --teleop.id=blue ``` """ import time from dataclasses import dataclass import draccus import numpy as np from lerobot.model.kinematics import RobotKinematics from lerobot.robots import ( # noqa: F401 RobotConfig, koch_follower, make_robot_from_config, so100_follower, ) from lerobot.teleoperators import ( # noqa: F401 TeleoperatorConfig, gamepad, koch_leader, make_teleoperator_from_config, so100_leader, ) from lerobot.utils.robot_utils import busy_wait @dataclass class FindJointLimitsConfig: teleop: TeleoperatorConfig robot: RobotConfig # Limit the maximum frames per second. By default, no limit. teleop_time_s: float = 30 # Display all cameras on screen display_data: bool = False @draccus.wrap() def find_joint_and_ee_bounds(cfg: FindJointLimitsConfig): teleop = make_teleoperator_from_config(cfg.teleop) robot = make_robot_from_config(cfg.robot) teleop.connect() robot.connect() start_episode_t = time.perf_counter() robot_type = getattr(robot.config, "robot_type", "so101") if "so100" in robot_type or "so101" in robot_type: # Note to be compatible with the rest of the codebase, # we are using the new calibration method for so101 and so100 robot_type = "so_new_calibration" kinematics = RobotKinematics(cfg.robot.urdf_path, cfg.robot.target_frame_name) # Initialize min/max values observation = robot.get_observation() joint_positions = np.array([observation[f"{key}.pos"] for key in robot.bus.motors]) ee_pos = kinematics.forward_kinematics(joint_positions)[:3, 3] max_pos = joint_positions.copy() min_pos = joint_positions.copy() max_ee = ee_pos.copy() min_ee = ee_pos.copy() while True: action = teleop.get_action() robot.send_action(action) observation = robot.get_observation() joint_positions = np.array([observation[f"{key}.pos"] for key in robot.bus.motors]) ee_pos = kinematics.forward_kinematics(joint_positions)[:3, 3] # Skip initial warmup period if (time.perf_counter() - start_episode_t) < 5: continue # Update min/max values max_ee = np.maximum(max_ee, ee_pos) min_ee = np.minimum(min_ee, ee_pos) max_pos = np.maximum(max_pos, joint_positions) min_pos = np.minimum(min_pos, joint_positions) if time.perf_counter() - start_episode_t > cfg.teleop_time_s: print(f"Max ee position {np.round(max_ee, 4).tolist()}") print(f"Min ee position {np.round(min_ee, 4).tolist()}") print(f"Max joint pos position {np.round(max_pos, 4).tolist()}") print(f"Min joint pos position {np.round(min_pos, 4).tolist()}") break busy_wait(0.01) if __name__ == "__main__": find_joint_and_ee_bounds()
lerobot/src/lerobot/scripts/find_joint_limits.py/0
{ "file_path": "lerobot/src/lerobot/scripts/find_joint_limits.py", "repo_id": "lerobot", "token_count": 1529 }
215
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Helper to set motor ids and baudrate. Example: ```shell lerobot-setup-motors \ --teleop.type=so100_leader \ --teleop.port=/dev/tty.usbmodem575E0031751 ``` """ from dataclasses import dataclass import draccus from lerobot.robots import ( # noqa: F401 RobotConfig, koch_follower, lekiwi, make_robot_from_config, so100_follower, so101_follower, ) from lerobot.teleoperators import ( # noqa: F401 TeleoperatorConfig, koch_leader, make_teleoperator_from_config, so100_leader, so101_leader, ) COMPATIBLE_DEVICES = [ "koch_follower", "koch_leader", "so100_follower", "so100_leader", "so101_follower", "so101_leader", "lekiwi", ] @dataclass class SetupConfig: teleop: TeleoperatorConfig | None = None robot: RobotConfig | None = None def __post_init__(self): if bool(self.teleop) == bool(self.robot): raise ValueError("Choose either a teleop or a robot.") self.device = self.robot if self.robot else self.teleop @draccus.wrap() def setup_motors(cfg: SetupConfig): if cfg.device.type not in COMPATIBLE_DEVICES: raise NotImplementedError if isinstance(cfg.device, RobotConfig): device = make_robot_from_config(cfg.device) else: device = make_teleoperator_from_config(cfg.device) device.setup_motors() def main(): setup_motors() if __name__ == "__main__": main()
lerobot/src/lerobot/setup_motors.py/0
{ "file_path": "lerobot/src/lerobot/setup_motors.py", "repo_id": "lerobot", "token_count": 773 }
216
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import TeleoperatorConfig from .teleoperator import Teleoperator def make_teleoperator_from_config(config: TeleoperatorConfig) -> Teleoperator: if config.type == "keyboard": from .keyboard import KeyboardTeleop return KeyboardTeleop(config) elif config.type == "koch_leader": from .koch_leader import KochLeader return KochLeader(config) elif config.type == "so100_leader": from .so100_leader import SO100Leader return SO100Leader(config) elif config.type == "so101_leader": from .so101_leader import SO101Leader return SO101Leader(config) elif config.type == "stretch3": from .stretch3_gamepad import Stretch3GamePad return Stretch3GamePad(config) elif config.type == "widowx": from .widowx import WidowX return WidowX(config) elif config.type == "mock_teleop": from tests.mocks.mock_teleop import MockTeleop return MockTeleop(config) elif config.type == "gamepad": from .gamepad.teleop_gamepad import GamepadTeleop return GamepadTeleop(config) elif config.type == "keyboard_ee": from .keyboard.teleop_keyboard import KeyboardEndEffectorTeleop return KeyboardEndEffectorTeleop(config) elif config.type == "homunculus_glove": from .homunculus import HomunculusGlove return HomunculusGlove(config) elif config.type == "homunculus_arm": from .homunculus import HomunculusArm return HomunculusArm(config) elif config.type == "bi_so100_leader": from .bi_so100_leader import BiSO100Leader return BiSO100Leader(config) else: raise ValueError(config.type)
lerobot/src/lerobot/teleoperators/utils.py/0
{ "file_path": "lerobot/src/lerobot/teleoperators/utils.py", "repo_id": "lerobot", "token_count": 832 }
217
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import logging def is_package_available(pkg_name: str, return_version: bool = False) -> tuple[bool, str] | bool: """Copied from https://github.com/huggingface/transformers/blob/main/src/transformers/utils/import_utils.py Check if the package spec exists and grab its version to avoid importing a local directory. **Note:** this doesn't work for all packages. """ package_exists = importlib.util.find_spec(pkg_name) is not None package_version = "N/A" if package_exists: try: # Primary method to get the package version package_version = importlib.metadata.version(pkg_name) except importlib.metadata.PackageNotFoundError: # Fallback method: Only for "torch" and versions containing "dev" if pkg_name == "torch": try: package = importlib.import_module(pkg_name) temp_version = getattr(package, "__version__", "N/A") # Check if the version contains "dev" if "dev" in temp_version: package_version = temp_version package_exists = True else: package_exists = False except ImportError: # If the package can't be imported, it's not available package_exists = False elif pkg_name == "grpc": package = importlib.import_module(pkg_name) package_version = getattr(package, "__version__", "N/A") else: # For packages other than "torch", don't attempt the fallback and set as not available package_exists = False logging.debug(f"Detected {pkg_name} version: {package_version}") if return_version: return package_exists, package_version else: return package_exists _torch_available, _torch_version = is_package_available("torch", return_version=True) _gym_xarm_available = is_package_available("gym_xarm") _gym_aloha_available = is_package_available("gym_aloha") _gym_pusht_available = is_package_available("gym_pusht")
lerobot/src/lerobot/utils/import_utils.py/0
{ "file_path": "lerobot/src/lerobot/utils/import_utils.py", "repo_id": "lerobot", "token_count": 1132 }
218
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit-tests for the `RobotClient` action-queue logic (pure Python, no gRPC). We monkey-patch `lerobot.robots.utils.make_robot_from_config` so that no real hardware is accessed. Only the queue-update mechanism is verified. """ from __future__ import annotations import time from queue import Queue import pytest import torch # Skip entire module if grpc is not available pytest.importorskip("grpc") # ----------------------------------------------------------------------------- # Test fixtures # ----------------------------------------------------------------------------- @pytest.fixture() def robot_client(): """Fresh `RobotClient` instance for each test case (no threads started). Uses DummyRobot.""" # Import only when the test actually runs (after decorator check) from lerobot.scripts.server.configs import RobotClientConfig from lerobot.scripts.server.robot_client import RobotClient from tests.mocks.mock_robot import MockRobotConfig test_config = MockRobotConfig() # gRPC channel is not actually used in tests, so using a dummy address test_config = RobotClientConfig( robot=test_config, server_address="localhost:9999", policy_type="test", pretrained_name_or_path="test", actions_per_chunk=20, verify_robot_cameras=False, ) client = RobotClient(test_config) # Initialize attributes that are normally set in start() method client.chunks_received = 0 client.available_actions_size = [] yield client if client.robot.is_connected: client.stop() # ----------------------------------------------------------------------------- # Helper utilities for tests # ----------------------------------------------------------------------------- def _make_actions(start_ts: float, start_t: int, count: int): """Generate `count` consecutive TimedAction objects starting at timestep `start_t`.""" from lerobot.scripts.server.helpers import TimedAction fps = 30 # emulates most common frame-rate actions = [] for i in range(count): timestep = start_t + i timestamp = start_ts + i * (1 / fps) action_tensor = torch.full((6,), timestep, dtype=torch.float32) actions.append(TimedAction(action=action_tensor, timestep=timestep, timestamp=timestamp)) return actions # ----------------------------------------------------------------------------- # Tests # ----------------------------------------------------------------------------- def test_update_action_queue_discards_stale(robot_client): """`_update_action_queue` must drop actions with `timestep` <= `latest_action`.""" # Pretend we already executed up to action #4 robot_client.latest_action = 4 # Incoming chunk contains timesteps 3..7 -> expect 5,6,7 kept. incoming = _make_actions(start_ts=time.time(), start_t=3, count=5) # 3,4,5,6,7 robot_client._aggregate_action_queues(incoming) # Extract timesteps from queue resulting_timesteps = [a.get_timestep() for a in robot_client.action_queue.queue] assert resulting_timesteps == [5, 6, 7] @pytest.mark.parametrize( "weight_old, weight_new", [ (1.0, 0.0), (0.0, 1.0), (0.5, 0.5), (0.2, 0.8), (0.8, 0.2), (0.1, 0.9), (0.9, 0.1), ], ) def test_aggregate_action_queues_combines_actions_in_overlap( robot_client, weight_old: float, weight_new: float ): """`_aggregate_action_queues` must combine actions on overlapping timesteps according to the provided aggregate_fn, here tested with multiple coefficients.""" from lerobot.scripts.server.helpers import TimedAction robot_client.chunks_received = 0 # Pretend we already executed up to action #4, and queue contains actions for timesteps 5..6 robot_client.latest_action = 4 current_actions = _make_actions( start_ts=time.time(), start_t=5, count=2 ) # actions are [torch.ones(6), torch.ones(6), ...] current_actions = [ TimedAction(action=10 * a.get_action(), timestep=a.get_timestep(), timestamp=a.get_timestamp()) for a in current_actions ] for a in current_actions: robot_client.action_queue.put(a) # Incoming chunk contains timesteps 3..7 -> expect 5,6,7 kept. incoming = _make_actions(start_ts=time.time(), start_t=3, count=5) # 3,4,5,6,7 overlap_timesteps = [5, 6] # properly tested in test_aggregate_action_queues_discards_stale nonoverlap_timesteps = [7] robot_client._aggregate_action_queues( incoming, aggregate_fn=lambda x1, x2: weight_old * x1 + weight_new * x2 ) queue_overlap_actions = [] queue_non_overlap_actions = [] for a in robot_client.action_queue.queue: if a.get_timestep() in overlap_timesteps: queue_overlap_actions.append(a) elif a.get_timestep() in nonoverlap_timesteps: queue_non_overlap_actions.append(a) queue_overlap_actions = sorted(queue_overlap_actions, key=lambda x: x.get_timestep()) queue_non_overlap_actions = sorted(queue_non_overlap_actions, key=lambda x: x.get_timestep()) assert torch.allclose( queue_overlap_actions[0].get_action(), weight_old * current_actions[0].get_action() + weight_new * incoming[-3].get_action(), ) assert torch.allclose( queue_overlap_actions[1].get_action(), weight_old * current_actions[1].get_action() + weight_new * incoming[-2].get_action(), ) assert torch.allclose(queue_non_overlap_actions[0].get_action(), incoming[-1].get_action()) @pytest.mark.parametrize( "chunk_size, queue_len, expected", [ (20, 12, False), # 12 / 20 = 0.6 > g=0.5 threshold, not ready to send (20, 8, True), # 8 / 20 = 0.4 <= g=0.5, ready to send (10, 5, True), (10, 6, False), ], ) def test_ready_to_send_observation(robot_client, chunk_size: int, queue_len: int, expected: bool): """Validate `_ready_to_send_observation` ratio logic for various sizes.""" robot_client.action_chunk_size = chunk_size # Clear any existing actions then fill with `queue_len` dummy entries ---- robot_client.action_queue = Queue() dummy_actions = _make_actions(start_ts=time.time(), start_t=0, count=queue_len) for act in dummy_actions: robot_client.action_queue.put(act) assert robot_client._ready_to_send_observation() is expected @pytest.mark.parametrize( "g_threshold, expected", [ # The condition is `queue_size / chunk_size <= g`. # Here, ratio = 6 / 10 = 0.6. (0.0, False), # 0.6 <= 0.0 is False (0.1, False), (0.2, False), (0.3, False), (0.4, False), (0.5, False), (0.6, True), # 0.6 <= 0.6 is True (0.7, True), (0.8, True), (0.9, True), (1.0, True), ], ) def test_ready_to_send_observation_with_varying_threshold(robot_client, g_threshold: float, expected: bool): """Validate `_ready_to_send_observation` with fixed sizes and varying `g`.""" # Fixed sizes for this test: ratio = 6 / 10 = 0.6 chunk_size = 10 queue_len = 6 robot_client.action_chunk_size = chunk_size # This is the parameter we are testing robot_client._chunk_size_threshold = g_threshold # Fill queue with dummy actions robot_client.action_queue = Queue() dummy_actions = _make_actions(start_ts=time.time(), start_t=0, count=queue_len) for act in dummy_actions: robot_client.action_queue.put(act) assert robot_client._ready_to_send_observation() is expected
lerobot/tests/async_inference/test_robot_client.py/0
{ "file_path": "lerobot/tests/async_inference/test_robot_client.py", "repo_id": "lerobot", "token_count": 3039 }
219
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from lerobot.constants import HF_LEROBOT_HOME LEROBOT_TEST_DIR = HF_LEROBOT_HOME / "_testing" DUMMY_REPO_ID = "dummy/repo" DUMMY_ROBOT_TYPE = "dummy_robot" DUMMY_MOTOR_FEATURES = { "action": { "dtype": "float32", "shape": (6,), "names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"], }, "state": { "dtype": "float32", "shape": (6,), "names": ["shoulder_pan", "shoulder_lift", "elbow_flex", "wrist_flex", "wrist_roll", "gripper"], }, } DUMMY_CAMERA_FEATURES = { "laptop": {"shape": (480, 640, 3), "names": ["height", "width", "channels"], "info": None}, "phone": {"shape": (480, 640, 3), "names": ["height", "width", "channels"], "info": None}, } DEFAULT_FPS = 30 DUMMY_VIDEO_INFO = { "video.fps": DEFAULT_FPS, "video.codec": "av1", "video.pix_fmt": "yuv420p", "video.is_depth_map": False, "has_audio": False, } DUMMY_CHW = (3, 96, 128) DUMMY_HWC = (96, 128, 3)
lerobot/tests/fixtures/constants.py/0
{ "file_path": "lerobot/tests/fixtures/constants.py", "repo_id": "lerobot", "token_count": 637 }
220
# !/usr/bin/env python # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature from lerobot.policies.sac.reward_model.configuration_classifier import RewardClassifierConfig from lerobot.policies.sac.reward_model.modeling_classifier import ClassifierOutput from tests.utils import require_package def test_classifier_output(): output = ClassifierOutput( logits=torch.tensor([1, 2, 3]), probabilities=torch.tensor([0.1, 0.2, 0.3]), hidden_states=None, ) assert ( f"{output}" == "ClassifierOutput(logits=tensor([1, 2, 3]), probabilities=tensor([0.1000, 0.2000, 0.3000]), hidden_states=None)" ) @require_package("transformers") def test_binary_classifier_with_default_params(): from lerobot.policies.sac.reward_model.modeling_classifier import Classifier config = RewardClassifierConfig() config.input_features = { "observation.image": PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)), } config.output_features = { "next.reward": PolicyFeature(type=FeatureType.REWARD, shape=(1,)), } config.normalization_mapping = { "VISUAL": NormalizationMode.IDENTITY, "REWARD": NormalizationMode.IDENTITY, } config.num_cameras = 1 classifier = Classifier(config) batch_size = 10 input = { "observation.image": torch.rand((batch_size, 3, 128, 128)), "next.reward": torch.randint(low=0, high=2, size=(batch_size,)).float(), } images, labels = classifier.extract_images_and_labels(input) assert len(images) == 1 assert images[0].shape == torch.Size([batch_size, 3, 128, 128]) assert labels.shape == torch.Size([batch_size]) output = classifier.predict(images) assert output is not None assert output.logits.size() == torch.Size([batch_size]) assert not torch.isnan(output.logits).any(), "Tensor contains NaN values" assert output.probabilities.shape == torch.Size([batch_size]) assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values" assert output.hidden_states.shape == torch.Size([batch_size, 256]) assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values" @require_package("transformers") def test_multiclass_classifier(): from lerobot.policies.sac.reward_model.modeling_classifier import Classifier num_classes = 5 config = RewardClassifierConfig() config.input_features = { "observation.image": PolicyFeature(type=FeatureType.VISUAL, shape=(3, 224, 224)), } config.output_features = { "next.reward": PolicyFeature(type=FeatureType.REWARD, shape=(num_classes,)), } config.num_cameras = 1 config.num_classes = num_classes classifier = Classifier(config) batch_size = 10 input = { "observation.image": torch.rand((batch_size, 3, 128, 128)), "next.reward": torch.rand((batch_size, num_classes)), } images, labels = classifier.extract_images_and_labels(input) assert len(images) == 1 assert images[0].shape == torch.Size([batch_size, 3, 128, 128]) assert labels.shape == torch.Size([batch_size, num_classes]) output = classifier.predict(images) assert output is not None assert output.logits.shape == torch.Size([batch_size, num_classes]) assert not torch.isnan(output.logits).any(), "Tensor contains NaN values" assert output.probabilities.shape == torch.Size([batch_size, num_classes]) assert not torch.isnan(output.probabilities).any(), "Tensor contains NaN values" assert output.hidden_states.shape == torch.Size([batch_size, 256]) assert not torch.isnan(output.hidden_states).any(), "Tensor contains NaN values" @require_package("transformers") def test_default_device(): from lerobot.policies.sac.reward_model.modeling_classifier import Classifier config = RewardClassifierConfig() assert config.device == "cpu" classifier = Classifier(config) for p in classifier.parameters(): assert p.device == torch.device("cpu") @require_package("transformers") def test_explicit_device_setup(): from lerobot.policies.sac.reward_model.modeling_classifier import Classifier config = RewardClassifierConfig(device="cpu") assert config.device == "cpu" classifier = Classifier(config) for p in classifier.parameters(): assert p.device == torch.device("cpu")
lerobot/tests/policies/hilserl/test_modeling_classifier.py/0
{ "file_path": "lerobot/tests/policies/hilserl/test_modeling_classifier.py", "repo_id": "lerobot", "token_count": 1766 }
221
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import platform from functools import wraps import pytest import torch from lerobot import available_cameras, available_motors, available_robots from lerobot.utils.import_utils import is_package_available DEVICE = os.environ.get("LEROBOT_TEST_DEVICE", "cuda") if torch.cuda.is_available() else "cpu" TEST_ROBOT_TYPES = [] for robot_type in available_robots: TEST_ROBOT_TYPES += [(robot_type, True), (robot_type, False)] TEST_CAMERA_TYPES = [] for camera_type in available_cameras: TEST_CAMERA_TYPES += [(camera_type, True), (camera_type, False)] TEST_MOTOR_TYPES = [] for motor_type in available_motors: TEST_MOTOR_TYPES += [(motor_type, True), (motor_type, False)] # Camera indices used for connecting physical cameras OPENCV_CAMERA_INDEX = int(os.environ.get("LEROBOT_TEST_OPENCV_CAMERA_INDEX", 0)) INTELREALSENSE_SERIAL_NUMBER = int(os.environ.get("LEROBOT_TEST_INTELREALSENSE_SERIAL_NUMBER", 128422271614)) DYNAMIXEL_PORT = os.environ.get("LEROBOT_TEST_DYNAMIXEL_PORT", "/dev/tty.usbmodem575E0032081") DYNAMIXEL_MOTORS = { "shoulder_pan": [1, "xl430-w250"], "shoulder_lift": [2, "xl430-w250"], "elbow_flex": [3, "xl330-m288"], "wrist_flex": [4, "xl330-m288"], "wrist_roll": [5, "xl330-m288"], "gripper": [6, "xl330-m288"], } FEETECH_PORT = os.environ.get("LEROBOT_TEST_FEETECH_PORT", "/dev/tty.usbmodem585A0080971") FEETECH_MOTORS = { "shoulder_pan": [1, "sts3215"], "shoulder_lift": [2, "sts3215"], "elbow_flex": [3, "sts3215"], "wrist_flex": [4, "sts3215"], "wrist_roll": [5, "sts3215"], "gripper": [6, "sts3215"], } def require_x86_64_kernel(func): """ Decorator that skips the test if plateform device is not an x86_64 cpu. """ from functools import wraps @wraps(func) def wrapper(*args, **kwargs): if platform.machine() != "x86_64": pytest.skip("requires x86_64 plateform") return func(*args, **kwargs) return wrapper def require_cpu(func): """ Decorator that skips the test if device is not cpu. """ from functools import wraps @wraps(func) def wrapper(*args, **kwargs): if DEVICE != "cpu": pytest.skip("requires cpu") return func(*args, **kwargs) return wrapper def require_cuda(func): """ Decorator that skips the test if cuda is not available. """ from functools import wraps @wraps(func) def wrapper(*args, **kwargs): if not torch.cuda.is_available(): pytest.skip("requires cuda") return func(*args, **kwargs) return wrapper def require_env(func): """ Decorator that skips the test if the required environment package is not installed. As it need 'env_name' in args, it also checks whether it is provided as an argument. If 'env_name' is None, this check is skipped. """ @wraps(func) def wrapper(*args, **kwargs): # Determine if 'env_name' is provided and extract its value arg_names = func.__code__.co_varnames[: func.__code__.co_argcount] if "env_name" in arg_names: # Get the index of 'env_name' and retrieve the value from args index = arg_names.index("env_name") env_name = args[index] if len(args) > index else kwargs.get("env_name") else: raise ValueError("Function does not have 'env_name' as an argument.") # Perform the package check package_name = f"gym_{env_name}" if env_name is not None and not is_package_available(package_name): pytest.skip(f"gym-{env_name} not installed") return func(*args, **kwargs) return wrapper def require_package_arg(func): """ Decorator that skips the test if the required package is not installed. This is similar to `require_env` but more general in that it can check any package (not just environments). As it need 'required_packages' in args, it also checks whether it is provided as an argument. If 'required_packages' is None, this check is skipped. """ @wraps(func) def wrapper(*args, **kwargs): # Determine if 'required_packages' is provided and extract its value arg_names = func.__code__.co_varnames[: func.__code__.co_argcount] if "required_packages" in arg_names: # Get the index of 'required_packages' and retrieve the value from args index = arg_names.index("required_packages") required_packages = args[index] if len(args) > index else kwargs.get("required_packages") else: raise ValueError("Function does not have 'required_packages' as an argument.") if required_packages is None: return func(*args, **kwargs) # Perform the package check for package in required_packages: if not is_package_available(package): pytest.skip(f"{package} not installed") return func(*args, **kwargs) return wrapper def require_package(package_name): """ Decorator that skips the test if the specified package is not installed. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if not is_package_available(package_name): pytest.skip(f"{package_name} not installed") return func(*args, **kwargs) return wrapper return decorator
lerobot/tests/utils.py/0
{ "file_path": "lerobot/tests/utils.py", "repo_id": "lerobot", "token_count": 2376 }
222
import argparse import asyncio import hashlib import json import os import random from asyncio import Lock from typing import Set from datasets import load_dataset from tqdm.asyncio import tqdm import aiofiles import aiohttp import uvloop file_lock = Lock() async def generate_completion(session, prompt, args): retry_budget = 10 while retry_budget > 0: try: await asyncio.sleep(random.uniform(0.0, 0.1)) async with session.post( f"http://{args.api_addr}/v1/chat/completions", json={ "model": "default", "messages": [{"role": "user", "content": prompt}], "max_tokens": args.max_tokens, "temperature": args.temperature, "top_p": args.top_p, }, headers={"Authorization": "Bearer EMPTY"}, ) as response: return await response.json(content_type=None) except Exception as e: print(f"API error (will retry): {e}") retry_budget -= 1 await asyncio.sleep(10) return None async def process_example(example, session, args, output_file, pbar): prompt = args.prompt_template.format(prompt=example[args.prompt_column]) try: tasks = [generate_completion(session, prompt, args) for _ in range(args.num_generations)] completions = await asyncio.gather(*tasks) if any(completion is None for completion in completions): print(f"Error processing example") pbar.update(1) return None generations = [] finish_reasons = [] api_metadata = [] for completion in completions: generations.append(completion["choices"][0]["message"]["content"]) finish_reasons.append(completion["choices"][0]["finish_reason"]) api_metadata.append(completion["usage"]) # Combine original dataset fields with generations result = { **example, # Preserve all original dataset fields "generations": generations, "finish_reasons": finish_reasons, "api_metadata": api_metadata, } # Write to file with lock async with file_lock: async with aiofiles.open(output_file, mode="a") as f: await f.write(json.dumps(result) + "\n") await f.flush() pbar.set_postfix(active=len(pbar.active_tasks), refresh=False) pbar.update(1) return result except Exception as e: print(f"Error processing example: {e}") pbar.update(1) return None async def load_processed_uuids(output_file, uuid_column): processed_uuids = set() if os.path.exists(output_file): async with aiofiles.open(output_file, mode="r") as f: async for line in f: try: data = json.loads(line) processed_uuids.add(hashlib.md5(str(data[uuid_column]).encode()).hexdigest()) except json.JSONDecodeError: continue return processed_uuids async def main(): parser = argparse.ArgumentParser() parser.add_argument("--dataset-name", type=str, required=True) parser.add_argument("--output-file", type=str, required=True) parser.add_argument("--prompt-column", type=str, required=True) parser.add_argument("--uuid-column", type=str, required=True) parser.add_argument("--api-addr", type=str, default="localhost:39876") parser.add_argument("--num-generations", type=int, default=4) parser.add_argument( "--prompt-template", type=str, default="You will be given a problem. Please reason step by step, and put your final answer within \\boxed{{}}:\n{prompt}", ) parser.add_argument("--temperature", type=float, default=0.6) parser.add_argument("--top-p", type=float, default=0.95) parser.add_argument("--max-tokens", type=int, default=16384) parser.add_argument("--max-concurrent", type=int, default=1000) args = parser.parse_args() dataset = load_dataset(args.dataset_name, split="train").shuffle() processed_uuids = await load_processed_uuids(args.output_file, args.uuid_column) if processed_uuids: print(f"Found {len(processed_uuids)} already processed examples, resuming from there...") if not os.path.exists(args.output_file): async with aiofiles.open(args.output_file, mode="w") as f: await f.write("") active_tasks: Set[asyncio.Task] = set() pbar = tqdm( total=len(dataset) - len(processed_uuids), desc="Generating responses", unit="row", mininterval=2, smoothing=0.0001, ) pbar.active_tasks = active_tasks async with aiohttp.ClientSession( timeout=aiohttp.ClientTimeout(total=60 * 60), connector=aiohttp.TCPConnector(limit=args.max_concurrent, ttl_dns_cache=300, keepalive_timeout=60 * 60), ) as session: for example in dataset: uuid = hashlib.md5(str(example[args.uuid_column]).encode()).hexdigest() if uuid not in processed_uuids: # Wait if we've hit the concurrency limit while len(active_tasks) >= args.max_concurrent: done, active_tasks = await asyncio.wait(active_tasks, return_when=asyncio.FIRST_COMPLETED) for task in done: try: await task except Exception as e: print(f"Task failed: {e}") task = asyncio.create_task(process_example(example, session, args, args.output_file, pbar)) active_tasks.add(task) task.add_done_callback(active_tasks.discard) pbar.set_postfix(active=len(active_tasks), refresh=True) # Wait for remaining tasks if active_tasks: await asyncio.gather(*active_tasks, return_exceptions=True) pbar.close() if __name__ == "__main__": uvloop.install() asyncio.run(main())
open-r1/scripts/generate_reasoning.py/0
{ "file_path": "open-r1/scripts/generate_reasoning.py", "repo_id": "open-r1", "token_count": 2800 }
223
#!/bin/bash #SBATCH --partition=hopper-cpu #SBATCH --mem=16g #SBATCH --cpus-per-task=16 #SBATCH --output=/fsx/open-r1/logs/morph_router/%x-%j.out #SBATCH --err=/fsx/open-r1/logs/morph_router/%x-%j.err #SBATCH --requeue #SBATCH --time=7-00:00:00 echo "Starting job" set -x -e source ~/.bashrc source openr1/bin/activate srun python scripts/morph_router.py --port 8001 --max_num_sandboxes 20
open-r1/slurm/morph_router.slurm/0
{ "file_path": "open-r1/slurm/morph_router.slurm", "repo_id": "open-r1", "token_count": 175 }
224
from .cf_scoring import score_submission from .code_patcher import patch_code from .ioi_scoring import SubtaskResult, score_subtask, score_subtasks from .ioi_utils import add_includes from .morph_client import get_morph_client_from_env from .piston_client import get_piston_client_from_env, get_slurm_piston_endpoints __all__ = [ "get_piston_client_from_env", "get_slurm_piston_endpoints", "get_morph_client_from_env", "patch_code", "score_submission", "score_subtask", "score_subtasks", "add_includes", "SubtaskResult", ]
open-r1/src/open_r1/utils/competitive_programming/__init__.py/0
{ "file_path": "open-r1/src/open_r1/utils/competitive_programming/__init__.py", "repo_id": "open-r1", "token_count": 216 }
225
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeepSpeed [DeepSpeed](https://www.deepspeed.ai/) is a library designed for speed and scale for distributed training of large models with billions of parameters. At its core is the Zero Redundancy Optimizer (ZeRO) that shards optimizer states (ZeRO-1), gradients (ZeRO-2), and parameters (ZeRO-3) across data parallel processes. This drastically reduces memory usage, allowing you to scale your training to billion parameter models. To unlock even more memory efficiency, ZeRO-Offload reduces GPU compute and memory by leveraging CPU resources during optimization. Both of these features are supported in 🤗 Accelerate, and you can use them with 🤗 PEFT. ## Compatibility with `bitsandbytes` quantization + LoRA Below is a table that summarizes the compatibility between PEFT's LoRA, [`bitsandbytes`](https://github.com/TimDettmers/bitsandbytes) library and DeepSpeed Zero stages with respect to fine-tuning. DeepSpeed Zero-1 and 2 will have no effect at inference as stage 1 shards the optimizer states and stage 2 shards the optimizer states and gradients: | DeepSpeed stage | Is compatible? | |---|---| | Zero-1 | 🟢 | | Zero-2 | 🟢 | | Zero-3 | 🟢 | For DeepSpeed Stage 3 + QLoRA, please refer to the section [Use PEFT QLoRA and DeepSpeed with ZeRO3 for finetuning large models on multiple GPUs](#use-peft-qlora-and-deepspeed-with-zero3-for-finetuning-large-models-on-multiple-gpus) below. For confirming these observations, we ran the SFT (Supervised Fine-tuning) [offical example scripts](https://github.com/huggingface/trl/tree/main/examples) of the [Transformers Reinforcement Learning (TRL) library](https://github.com/huggingface/trl) using QLoRA + PEFT and the accelerate configs available [here](https://github.com/huggingface/trl/tree/main/examples/accelerate_configs). We ran these experiments on a 2x NVIDIA T4 GPU. # Use PEFT and DeepSpeed with ZeRO3 for finetuning large models on multiple devices and multiple nodes This section of guide will help you learn how to use our DeepSpeed [training script](https://github.com/huggingface/peft/blob/main/examples/sft/train.py) for performing SFT. You'll configure the script to do SFT (supervised fine-tuning) of Llama-70B model with LoRA and ZeRO-3 on 8xH100 80GB GPUs on a single machine. You can configure it to scale to multiple machines by changing the accelerate config. ## Configuration Start by running the following command to [create a DeepSpeed configuration file](https://huggingface.co/docs/accelerate/quicktour#launching-your-distributed-script) with 🤗 Accelerate. The `--config_file` flag allows you to save the configuration file to a specific location, otherwise it is saved as a `default_config.yaml` file in the 🤗 Accelerate cache. The configuration file is used to set the default options when you launch the training script. ```bash accelerate config --config_file deepspeed_config.yaml ``` You'll be asked a few questions about your setup, and configure the following arguments. In this example, you'll use ZeRO-3 so make sure you pick those options. ```bash `zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning `gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. Pass the same value as you would pass via cmd argument else you will encounter mismatch error. `gradient_clipping`: Enable gradient clipping with value. Don't set this as you will be passing it via cmd arguments. `offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. Set this as `none` as don't want to enable offloading. `offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. Set this as `none` as don't want to enable offloading. `zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. Set this to `True`. `zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. Set this to `True`. `mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. Set this to `True`. ``` Once this is done, the corresponding config should look like below and you can find it in config folder at [deepspeed_config.yaml](https://github.com/huggingface/peft/blob/main/examples/sft/configs/deepspeed_config.yaml): ```yml compute_environment: LOCAL_MACHINE debug: false deepspeed_config: deepspeed_multinode_launcher: standard gradient_accumulation_steps: 4 offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` ## Launch command The launch command is available at [run_peft_deepspeed.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_peft_deepspeed.sh) and it is also shown below: ```bash accelerate launch --config_file "configs/deepspeed_config.yaml" train.py \ --seed 100 \ --model_name_or_path "meta-llama/Llama-2-70b-hf" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ --hub_strategy "every_save" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "llama-sft-lora-deepspeed" \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --gradient_accumulation_steps 4 \ --gradient_checkpointing True \ --use_reentrant False \ --dataset_text_field "content" \ --use_flash_attn True \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "all-linear" \ --use_4bit_quantization False ``` Notice that we are using LoRA with rank=8, alpha=16 and targeting all linear layers. We are passing the deepspeed config file and finetuning 70B Llama model on a subset of the ultrachat dataset. ## The important parts Let's dive a little deeper into the script so you can see what's going on, and understand how it works. The first thing to know is that the script uses DeepSpeed for distributed training as the DeepSpeed config has been passed. The [`~trl.SFTTrainer`] class handles all the heavy lifting of creating the PEFT model using the peft config that is passed. After that, when you call `trainer.train()`, [`~trl.SFTTrainer`] internally uses 🤗 Accelerate to prepare the model, optimizer and trainer using the DeepSpeed config to create DeepSpeed engine which is then trained. The main code snippet is below: ```python # trainer trainer = SFTTrainer( model=model, processing_class=tokenizer, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=peft_config, ) trainer.accelerator.print(f"{trainer.model}") # train checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint trainer.train(resume_from_checkpoint=checkpoint) # saving final model trainer.save_model() ``` ## Memory usage In the above example, the memory consumed per GPU is 64 GB (80%) as seen in the screenshot below: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/peft_deepspeed_mem_usage.png"/> </div> <small>GPU memory usage for the training run</small> ## More resources You can also refer this blog post [Falcon 180B Finetuning using 🤗 PEFT and DeepSpeed](https://medium.com/@sourabmangrulkar/falcon-180b-finetuning-using-peft-and-deepspeed-b92643091d99) on how to finetune 180B Falcon model on 16 A100 GPUs on 2 machines. # Use PEFT QLoRA and DeepSpeed with ZeRO3 for finetuning large models on multiple GPUs In this section, we will look at how to use QLoRA and DeepSpeed Stage-3 for finetuning 70B llama model on 2X40GB GPUs. For this, we first need `bitsandbytes>=0.43.3`, `accelerate>=1.0.1`, `transformers>4.44.2`, `trl>0.11.4` and `peft>0.13.0`. We need to set `zero3_init_flag` to true when using Accelerate config. Below is the config which can be found at [deepspeed_config_z3_qlora.yaml](https://github.com/huggingface/peft/blob/main/examples/sft/configs/deepspeed_config_z3_qlora.yaml): ```yml compute_environment: LOCAL_MACHINE debug: false deepspeed_config: deepspeed_multinode_launcher: standard offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` Launch command is given below which is available at [run_peft_qlora_deepspeed_stage3.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_peft_qlora_deepspeed_stage3.sh): ``` accelerate launch --config_file "configs/deepspeed_config_z3_qlora.yaml" train.py \ --seed 100 \ --model_name_or_path "meta-llama/Llama-2-70b-hf" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ --hub_strategy "every_save" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "llama-sft-qlora-dsz3" \ --per_device_train_batch_size 2 \ --per_device_eval_batch_size 2 \ --gradient_accumulation_steps 2 \ --gradient_checkpointing True \ --use_reentrant True \ --dataset_text_field "content" \ --use_flash_attn True \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "all-linear" \ --use_4bit_quantization True \ --use_nested_quant True \ --bnb_4bit_compute_dtype "bfloat16" \ --bnb_4bit_quant_storage_dtype "bfloat16" ``` Notice the new argument being passed `bnb_4bit_quant_storage_dtype` which denotes the data type for packing the 4-bit parameters. For example, when it is set to `bfloat16`, **32/4 = 8** 4-bit params are packed together post quantization. In terms of training code, the important code changes are: ```diff ... bnb_config = BitsAndBytesConfig( load_in_4bit=args.use_4bit_quantization, bnb_4bit_quant_type=args.bnb_4bit_quant_type, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=args.use_nested_quant, + bnb_4bit_quant_storage=quant_storage_dtype, ) ... model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, quantization_config=bnb_config, trust_remote_code=True, attn_implementation="flash_attention_2" if args.use_flash_attn else "eager", + torch_dtype=quant_storage_dtype or torch.float32, ) ``` Notice that `torch_dtype` for `AutoModelForCausalLM` is same as the `bnb_4bit_quant_storage` data type. That's it. Everything else is handled by Trainer and TRL. ## Memory usage In the above example, the memory consumed per GPU is **36.6 GB**. Therefore, what took 8X80GB GPUs with DeepSpeed Stage 3+LoRA and a couple of 80GB GPUs with DDP+QLoRA now requires 2X40GB GPUs. This makes finetuning of large models more accessible. # Use PEFT and DeepSpeed with ZeRO3 and CPU Offloading for finetuning large models on a single GPU This section of guide will help you learn how to use our DeepSpeed [training script](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py). You'll configure the script to train a large model for conditional generation with ZeRO-3 and CPU Offload. <Tip> 💡 To help you get started, check out our example training scripts for [causal language modeling](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_accelerate_ds_zero3_offload.py) and [conditional generation](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py). You can adapt these scripts for your own applications or even use them out of the box if your task is similar to the one in the scripts. </Tip> ## Configuration Start by running the following command to [create a DeepSpeed configuration file](https://huggingface.co/docs/accelerate/quicktour#launching-your-distributed-script) with 🤗 Accelerate. The `--config_file` flag allows you to save the configuration file to a specific location, otherwise it is saved as a `default_config.yaml` file in the 🤗 Accelerate cache. The configuration file is used to set the default options when you launch the training script. ```bash accelerate config --config_file ds_zero3_cpu.yaml ``` You'll be asked a few questions about your setup, and configure the following arguments. In this example, you'll use ZeRO-3 along with CPU-Offload so make sure you pick those options. ```bash `zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning `gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. `gradient_clipping`: Enable gradient clipping with value. `offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. `offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. `zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. `zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. `mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. ``` An example [configuration file](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/accelerate_ds_zero3_cpu_offload_config.yaml) might look like the following. The most important thing to notice is that `zero_stage` is set to `3`, and `offload_optimizer_device` and `offload_param_device` are set to the `cpu`. ```yml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false ``` ## The important parts Let's dive a little deeper into the script so you can see what's going on, and understand how it works. Within the [`main`](https://github.com/huggingface/peft/blob/2822398fbe896f25d4dac5e468624dc5fd65a51b/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py#L103) function, the script creates an [`~accelerate.Accelerator`] class to initialize all the necessary requirements for distributed training. <Tip> 💡 Feel free to change the model and dataset inside the `main` function. If your dataset format is different from the one in the script, you may also need to write your own preprocessing function. </Tip> The script also creates a configuration for the 🤗 PEFT method you're using, which in this case, is LoRA. The [`LoraConfig`] specifies the task type and important parameters such as the dimension of the low-rank matrices, the matrices scaling factor, and the dropout probability of the LoRA layers. If you want to use a different 🤗 PEFT method, make sure you replace `LoraConfig` with the appropriate [class](../package_reference/tuners). ```diff def main(): + accelerator = Accelerator() model_name_or_path = "facebook/bart-large" dataset_name = "twitter_complaints" + peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) ``` Throughout the script, you'll see the [`~accelerate.Accelerator.main_process_first`] and [`~accelerate.Accelerator.wait_for_everyone`] functions which help control and synchronize when processes are executed. The [`get_peft_model`] function takes a base model and the [`peft_config`] you prepared earlier to create a [`PeftModel`]: ```diff model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) + model = get_peft_model(model, peft_config) ``` Pass all the relevant training objects to 🤗 Accelerate's [`~accelerate.Accelerator.prepare`] which makes sure everything is ready for training: ```py model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler ) ``` The next bit of code checks whether the DeepSpeed plugin is used in the `Accelerator`, and if the plugin exists, then we check if we are using ZeRO-3. This conditional flag is used when calling `generate` function call during inference for syncing GPUs when the model parameters are sharded: ```py is_ds_zero_3 = False if getattr(accelerator.state, "deepspeed_plugin", None): is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3 ``` Inside the training loop, the usual `loss.backward()` is replaced by 🤗 Accelerate's [`~accelerate.Accelerator.backward`] which uses the correct `backward()` method based on your configuration: ```diff for epoch in range(num_epochs): with TorchTracemalloc() as tracemalloc: model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` That is all! The rest of the script handles the training loop, evaluation, and even pushes it to the Hub for you. ## Train Run the following command to launch the training script. Earlier, you saved the configuration file to `ds_zero3_cpu.yaml`, so you'll need to pass the path to the launcher with the `--config_file` argument like this: ```bash accelerate launch --config_file ds_zero3_cpu.yaml examples/peft_lora_seq2seq_accelerate_ds_zero3_offload.py ``` You'll see some output logs that track memory usage during training, and once it's completed, the script returns the accuracy and compares the predictions to the labels: ```bash GPU Memory before entering the train : 1916 GPU Memory consumed at the end of the train (end-begin): 66 GPU Peak Memory consumed during the train (max-begin): 7488 GPU Total Peak Memory consumed during the train (max): 9404 CPU Memory before entering the train : 19411 CPU Memory consumed at the end of the train (end-begin): 0 CPU Peak Memory consumed during the train (max-begin): 0 CPU Total Peak Memory consumed during the train (max): 19411 epoch=4: train_ppl=tensor(1.0705, device='cuda:0') train_epoch_loss=tensor(0.0681, device='cuda:0') 100%|████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:27<00:00, 3.92s/it] GPU Memory before entering the eval : 1982 GPU Memory consumed at the end of the eval (end-begin): -66 GPU Peak Memory consumed during the eval (max-begin): 672 GPU Total Peak Memory consumed during the eval (max): 2654 CPU Memory before entering the eval : 19411 CPU Memory consumed at the end of the eval (end-begin): 0 CPU Peak Memory consumed during the eval (max-begin): 0 CPU Total Peak Memory consumed during the eval (max): 19411 accuracy=100.0 eval_preds[:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint'] dataset['train'][label_column][:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint'] ``` # Caveats 1. Merging when using PEFT and DeepSpeed is currently unsupported and will raise error. 2. When using CPU offloading, the major gains from using PEFT to shrink the optimizer states and gradients to that of the adapter weights would be realized on CPU RAM and there won't be savings with respect to GPU memory. 3. DeepSpeed Stage 3 and qlora when used with CPU offloading leads to more GPU memory usage when compared to disabling CPU offloading. <Tip> 💡 When you have code that requires merging (and unmerging) of weights, try to manually collect the parameters with DeepSpeed Zero-3 beforehand: ```python import deepspeed is_ds_zero_3 = ... # check if Zero-3 with deepspeed.zero.GatheredParameters(list(model.parameters()), enabled= is_ds_zero_3): model.merge_adapter() # do whatever is needed, then unmerge in the same context if unmerging is required ... model.unmerge_adapter() ``` </Tip>
peft/docs/source/accelerate/deepspeed.md/0
{ "file_path": "peft/docs/source/accelerate/deepspeed.md", "repo_id": "peft", "token_count": 7520 }
226
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT 🤗 PEFT (Parameter-Efficient Fine-Tuning) is a library for efficiently adapting large pretrained models to various downstream applications without fine-tuning all of a model's parameters because it is prohibitively costly. PEFT methods only fine-tune a small number of (extra) model parameters - significantly decreasing computational and storage costs - while yielding performance comparable to a fully fine-tuned model. This makes it more accessible to train and store large language models (LLMs) on consumer hardware. PEFT is integrated with the Transformers, Diffusers, and Accelerate libraries to provide a faster and easier way to load, train, and use large models for inference. <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="quicktour" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Quicktour</div> <p class="text-gray-700">Start here if you're new to 🤗 PEFT to get an overview of the library's main features, and how to train a model with a PEFT method.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./task_guides/prompt_based_methods" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides demonstrating how to apply various PEFT methods across different types of tasks like image classification, causal language modeling, automatic speech recognition, and more. Learn how to use 🤗 PEFT with the DeepSpeed and Fully Sharded Data Parallel scripts.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual_guides/adapter" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">Get a better theoretical understanding of how LoRA and various soft prompting methods help reduce the number of trainable parameters to make training more efficient.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/config" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">Technical descriptions of how 🤗 PEFT classes and methods work.</p> </a> </div> </div> <iframe src="https://stevhliu-peft-methods.hf.space" frameborder="0" width="850" height="620" ></iframe>
peft/docs/source/index.md/0
{ "file_path": "peft/docs/source/index.md", "repo_id": "peft", "token_count": 1162 }
227
<jupyter_start><jupyter_text>CPT Training and InferenceThis notebook demonstrates the training and evaluation process of Context-Aware Prompt Tuning (CPT) using the Hugging Face Trainer. For more details, refer to the [Paper](https://huggingface.co/papers/2410.17222). Sections Overview:1. **Setup**: Import libraries and configure the environment.2. **Data Preparation**: Load and preprocess the dataset.3. **Model Training**: Configure and train the model.4. **Evaluation**: Test the model's performance and visualize results. Setup--- Installation<jupyter_code>!pip install datasets !pip install git+https://github.com/huggingface/peft<jupyter_output>Requirement already satisfied: datasets in /usr/local/lib/python3.10/dist-packages (3.1.0) Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from datasets) (3.16.1) Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from datasets) (1.26.4) Requirement already satisfied: pyarrow>=15.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (17.0.0) Requirement already satisfied: dill<0.3.9,>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.3.8) Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets) (2.2.2) Requirement already satisfied: requests>=2.32.2 in /usr/local/lib/python3.10/dist-packages (from datasets) (2.32.3) Requirement already satisfied: tqdm>=4.66.3 in /usr/local/lib/python3.10/dist-packages (from datasets) (4.66.6) Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets) (3.5.0) Requirement already [...]<jupyter_text>Imports<jupyter_code>from typing import Any, Dict, List, Union import numpy as np import torch from datasets import load_dataset from torch.utils.data import Dataset from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, Trainer, TrainingArguments, ) from peft import CPTConfig, get_peft_model MAX_INPUT_LENGTH = 1024 MAX_ICL_SAMPLES = 10 NUM_TRAINING_SAMPLES = 100 model_id = 'bigscience/bloom-1b7'<jupyter_output><empty_output><jupyter_text>Data Preparation---<jupyter_code># Initialize the tokenizer tokenizer = AutoTokenizer.from_pretrained( model_id, # The name or path of the pre-trained tokenizer (e.g., "bert-base-uncased"). cache_dir='.', # Directory to cache the tokenizer files locally. padding_side='right', # Specifies that padding should be added to the right side of sequences. trust_remote_code=True # Allows loading tokenizer implementations from external sources. ) # Load the SST-2 dataset from the GLUE benchmark dataset = load_dataset('glue', 'sst2') def add_string_labels(example): """ Converts numerical labels into human-readable string labels. Args: example (dict): A single example from the dataset with a numerical 'label'. Returns: dict: The example augmented with a 'label_text' field. """ # Map numerical label to string label example['label_text'] = "positive" if example['label'] == 1 else "negative" return example # Subset and process the training dataset context_dataset = dataset['train'].select(range(MAX_ICL_SAMPLES)).map(add_string_labels) train_dataset = dataset['train'].select(range(MAX_ICL_SAMPLES, NUM_TRAINING_SAMPLES + MAX_ICL_SAMPLES)).map(add_string_labels)<jupyter_output><empty_output><jupyter_text>**Note:** This notebook uses small subsets of the dataset to ensure quick execution. For proper testing and evaluation, it is recommended to use the entire dataset by setting quick_review to False.<jupyter_code>quick_review = True # set to False for a comprehensive evaluation num_of_test_examples = 100 if quick_review else len(dataset['validation']) # Subset and process the validation dataset test_dataset = dataset['validation'].select(range(num_of_test_examples)).map(add_string_labels) class CPTDataset(Dataset): def __init__(self, samples, tokenizer, template, max_length=MAX_INPUT_LENGTH): """ Initialize the CPTDataset with samples, a tokenizer, and a template. Args: samples (list): List of samples containing input sentences and labels. tokenizer: Tokenizer instance for encoding text. template (dict): Dictionary defining input/output templates and separators. max_length (int): Maximum input length for truncation. """ self.template = template self.tokenizer = tokenizer self.max_length = max_length # Storage for tokenized inputs and masks self.attention_mask = [] self.input_ids = [] self.input_type_mask = [] self.inter_seperator_ids = self._get_input_ids(template['inter_seperator']) # Tokenize each sample and prepare inputs for sample_i in tqdm(samples): input_text, label = sample_i['sentence'], sample_i['label_text'] input_ids, attention_mask, input_type_mask = self.preprocess_sentence(input_text, label) self.input_ids.append(input_ids) self.attention_mask.append(attention_mask) self.input_type_mask.append(input_type_mask) def _get_input_ids(self, text): """ Tokenize the given text into input IDs. Args: text (str): The text to tokenize. Returns: list: Tokenized input IDs. """ return self.tokenizer(text, add_special_tokens=False)["input_ids"] def preprocess_sentence(self, input_text, label): """ Preprocess a sentence and its corresponding label using templates. Args: input_text (str): The input sentence. label (str): The label text (e.g., "positive", "negative"). Returns: tuple: (input_ids, attention_mask, input_type_mask) """ # Split input template into parts input_template_part_1_text, input_template_part_2_text = self.template['input'].split('{}') input_template_tokenized_part1 = self._get_input_ids(input_template_part_1_text) input_tokenized = self._get_input_ids(input_text) input_template_tokenized_part2 = self._get_input_ids(input_template_part_2_text) # Separator token sep_tokenized = self._get_input_ids(self.template['intra_seperator']) # Process the label using the template label_template_part_1, label_template_part_2 = self.template['output'].split('{}') label_template_part1_tokenized = self._get_input_ids(label_template_part_1) label_tokenized = self._get_input_ids(label) label_template_part2_tokenized = self._get_input_ids(label_template_part_2) # End-of-sequence token eos = [self.tokenizer.eos_token_id] if self.tokenizer.eos_token_id is not None else [] # Concatenate all tokenized parts input_ids = input_template_tokenized_part1 + input_tokenized + input_template_tokenized_part2 + sep_tokenized + label_template_part1_tokenized + label_tokenized + label_template_part2_tokenized + eos # Generate attention and type masks attention_mask = [1] * len(input_ids) input_type_mask = [1] * len(input_template_tokenized_part1) + [2] * len(input_tokenized) + [1] * len( input_template_tokenized_part2) + [0] * len(sep_tokenized) + \ [3] * len(label_template_part1_tokenized) + [4] * len(label_tokenized) + [3] * len( \ label_template_part2_tokenized) + [0] * len(eos) # Ensure all masks and inputs are the same length assert len(input_type_mask) == len(input_ids) == len(attention_mask) return input_ids, attention_mask, input_type_mask def __len__(self): """ Get the number of examples in the dataset. Returns: int: Number of examples. """ return len(self.input_ids) def __getitem__(self, idx): """ Get the tokenized representation for the given index. Args: idx (int): Index of the example. Returns: dict: Tokenized inputs with attention and type masks. """ return { "input_ids": self.input_ids[idx], "attention_mask": self.attention_mask[idx], "input_type_mask": self.input_type_mask[idx] } # Define templates for tokenization templates = { 'input': 'input: {}', # Input template with placeholder 'intra_seperator': ' ', # Separator between input and output 'output': 'output: {}', # Output template with placeholder 'inter_seperator': '\n' # Separator between examples } # Initialize the dataset cpt_train_dataset = CPTDataset(train_dataset, tokenizer, templates) # - `templates`: Define how inputs and outputs should be formatted and separated. # - `CPTDataset`: Converts the raw dataset into tokenized input IDs and masks. # Initialize storage for context-level information context_ids = [] # Concatenated input IDs for all samples context_attention_mask = [] # Concatenated attention masks context_input_type_mask = [] # Concatenated input type masks first_type_mask = 0 # Initial offset for input type mask cpt_context_dataset = CPTDataset(context_dataset, tokenizer, templates) # Iterate through the CPT training dataset for i in range(len(context_dataset)): # Add input IDs to the context context_ids += cpt_context_dataset[i]['input_ids'] # Add attention mask to the context context_attention_mask += cpt_context_dataset[i]['attention_mask'] # Adjust and add the input type mask to the context context_input_type_mask += [ i + first_type_mask if i > 0 else 0 # Increment type indices dynamically for i in cpt_context_dataset[i]['input_type_mask'] ] # Increment the type mask offset after processing the sample first_type_mask += 4<jupyter_output>100%|██████████| 10/10 [00:00<00:00, 1133.50it/s]<jupyter_text>Model Training--- Load model<jupyter_code># Load a pre-trained causal language model base_model = AutoModelForCausalLM.from_pretrained( model_id, cache_dir='.', torch_dtype=torch.float16, device_map='auto' ) # Initialize the CPT configuration config = CPTConfig( cpt_token_ids=context_ids, cpt_mask=context_attention_mask, cpt_tokens_type_mask=context_input_type_mask, opt_weighted_loss_type='decay', opt_loss_decay_factor=0.95, # we choose the exponential decay factor applied to the loss opt_projection_epsilon=0.2, # we choose the projection over the input tokens opt_projection_format_epsilon=0.1, # we choose the projection over input and output templates tokenizer_name_or_path=model_id, ) # Initialize the CPT model with PEFT model = get_peft_model(base_model, config)<jupyter_output><empty_output><jupyter_text>Setting Collate Function<jupyter_code>class CPTDataCollatorForLanguageModeling(DataCollatorForLanguageModeling): def __init__(self, tokenizer, training=True, mlm=False): """ Custom collator for CPT-style language modeling. Args: tokenizer: The tokenizer to handle tokenization and special tokens. training (bool): If True, operates in training mode; otherwise in evaluation mode. mlm (bool): If True, enables masked language modeling. """ super().__init__(tokenizer, mlm=mlm) # Initialize the parent class self.training = training # Add a special padding token if not already defined self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]: """ Process a batch of examples for language modeling. Args: examples (List): A batch of examples with tokenized inputs and optional sample masks. Returns: Dict: A dictionary containing padded and tensor-converted inputs, attention masks, input type masks, and optional sample masks and labels. """ # Initialize a list to collect sample masks if provided list_sample_mask = [] for i in range(len(examples)): if "sample_mask" in examples[i].keys(): list_sample_mask.append(examples[i].pop("sample_mask")) # Define a helper function for padding sequences to the maximum length max_len = max(len(ex["input_ids"]) for ex in examples) # Define a helper function for padding sequences to the maximum length def pad_sequence(sequence, max_len, pad_value=0): return sequence + [pad_value] * (max_len - len(sequence)) # Pad and convert `input_ids`, `attention_mask`, and `input_type_mask` to tensors input_ids = torch.tensor([pad_sequence(ex["input_ids"], max_len) for ex in examples]) attention_mask = torch.tensor([pad_sequence(ex["attention_mask"], max_len) for ex in examples]) input_type_mask = torch.tensor([pad_sequence(ex["input_type_mask"], max_len) for ex in examples]) # Create the initial batch dictionary batch = {"input_ids": input_ids, "attention_mask": attention_mask, "input_type_mask": input_type_mask} # Create a tensor to store sample masks tensor_sample_mask = batch["input_ids"].clone().long() tensor_sample_mask[:, :] = 0 # Initialize with zeros # Populate the tensor with the provided sample masks for i in range(len(list_sample_mask)): tensor_sample_mask[i, : len(list_sample_mask[i])] = list_sample_mask[i] # Copy `input_ids` to use as `labels` batch["labels"] = batch["input_ids"].clone() # If in evaluation mode, include the `sample_mask` in the batch if not self.training: batch["sample_mask"] = tensor_sample_mask return batch<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>training_args = TrainingArguments( output_dir='../.', use_cpu=False, auto_find_batch_size=False, learning_rate=1e-4, logging_steps=100, per_device_train_batch_size=1, save_total_limit=1, remove_unused_columns=False, num_train_epochs=5, fp16=True, save_strategy='no', logging_dir="logs", report_to="none" ) trainer = Trainer( model=model, args=training_args, train_dataset=cpt_train_dataset, # Custom CPT training dataset. data_collator=CPTDataCollatorForLanguageModeling(tokenizer, training=True, mlm=False) ) trainer.train()<jupyter_output><empty_output><jupyter_text>Model Evaluation---<jupyter_code>model.eval() # Select relevant columns from the test dataset test_dataset = test_dataset.select_columns(['sentence', 'label_text']) # Convert the test dataset to a CPT-compatible format cpt_test_dataset = CPTDataset(test_dataset, tokenizer, templates) # Get the device where the model is loaded (CPU, GPU or XPU) device = model.device list_bool_predictions = [] for i in range(len(test_dataset)): input_ids, input_type_mask = cpt_test_dataset[i]['input_ids'], cpt_test_dataset[i]['input_type_mask'] # Pass the inputs through the model outputs = model( input_ids=torch.Tensor(input_ids).long().to(device=device).view(1, -1), labels=torch.Tensor(input_ids).long().to(device=device).view(1, -1), input_type_mask=torch.Tensor(input_type_mask).long().to(device=device).view(1, -1) ) # Shift logits to exclude the last token and match the labels shifted_logits = outputs.logits[..., :-1, :].contiguous().to(model.dtype)[0, -len(input_ids) + 1:] shift_labels = torch.Tensor(input_ids).long().to(device=device).view(1, -1)[0, 1:].contiguous().to(device) shifted_input_type_mask = torch.Tensor(input_type_mask).long().to(device=device).view(1, -1)[..., 1:].contiguous().to(device) # Create a mask for the type `4` tokens (label tokens) mask = torch.Tensor(shifted_input_type_mask).long().to(device=device).view(-1,) == 4 # Extract logits and labels corresponding to the mask logit = shifted_logits[mask] label = shift_labels[mask] # All possible label tokens for `negative` and `positive` all_labels = torch.Tensor([tokenizer(i, add_special_tokens=False)["input_ids"] for i in ['negative', 'positive']]).long().to(device).view(-1,) # Compare logits with label tokens and infer prediction prediction = logit[0, torch.Tensor([tokenizer(i, add_special_tokens=False)["input_ids"] for i in ['negative', 'positive']]).long().to(device).view(-1,)].argmax() prediction_text = 'negative' if prediction == 0 else 'positive' print(f"Sentence: {tokenizer.decode(input_ids)} \n \t The prediction is: {prediction_text}\n \t The GT is {tokenizer.decode(label)}") list_bool_predictions.append(prediction_text == tokenizer.decode(label)) print(f'The model Acc is {100 * np.mean(list_bool_predictions)}%')<jupyter_output>100%|██████████| 100/100 [00:00<00:00, 1972.82it/s]
peft/examples/cpt_finetuning/cpt_train_and_inference.ipynb/0
{ "file_path": "peft/examples/cpt_finetuning/cpt_train_and_inference.ipynb", "repo_id": "peft", "token_count": 6573 }
228
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Union # datasets imports import datasets # metric imports import evaluate import numpy as np import torch import transformers import wandb # accelerate imports from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset # hf imports from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer # peft imports from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model logger = get_logger(__name__, log_level="INFO") def parse_args(): parser = argparse.ArgumentParser(description="Whisper Fine-Tuning with AdaLora") parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument("--language", type=str, help="Language to use for training; e.g., 'Hindi' ", required=True) parser.add_argument("--language_abbr", type=str, help="Language to use for training; e.g., 'hi' ", required=True) parser.add_argument( "--task", type=str, default="transcribe", help="Task to use for training; e.g., 'transcribe' ", required=False ) parser.add_argument( "--dataset_name", type=str, default="mozilla-foundation/common_voice_11_0", help="Dataset to use for training; e.g., 'whisper' ", required=False, ) parser.add_argument( "--dataset_in_streaming_mode", action="store_true", help="Whether to use streaming mode for the dataset.", ) parser.add_argument( "--do_lower_case", action="store_true", help="lowercase the transcribed text before tokenizing" ) parser.add_argument( "--do_remove_punctuation", action="store_true", help="remove punctuation from the transcribed text" ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--max_audio_input_length", type=float, default=30.0, help="Maximum audio length in seconds.") parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--buffer_size", type=int, default=5000, help="Number of samples to prefetch in the streaming mode.", ) parser.add_argument( "--dataloader_pin_memory", action="store_true", help="Whether or not to pin memory for the DataLoader.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help="Number of subprocesses to use for data loading.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--load_best_model", action="store_true", help="Whether to load the best model at the end of training", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--logging_steps", type=int, default=100, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--evaluation_steps", type=int, default=500, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) # lora/adalora specific args parser.add_argument( "--use_peft", action="store_true", help="Whether to use PEFT", ) parser.add_argument( "--use_adalora", action="store_true", help="Whether to use AdaLoRA or LoRA. If set, uses AdaLoRA instead of the default LoRA.", ) parser.add_argument( "--init_r", type=int, default=12, help="Initial AdaLoRA rank", ) parser.add_argument( "--target_r", type=int, default=4, help="Target AdaLoRA rank", ) parser.add_argument( "--tinit", type=int, default=200, help="number of warmup steps for AdaLoRA wherein no pruning is performed", ) parser.add_argument( "--tfinal", type=int, default=1000, help=" fix the resulting budget distribution and fine-tune the model for tfinal steps when using AdaLoRA ", ) parser.add_argument( "--delta_t", type=int, default=10, help="interval of steps for AdaLoRA to update rank", ) parser.add_argument( "--lora_alpha", type=int, default=32, help="LORA alpha", ) parser.add_argument( "--r", type=int, default=8, help="LORA rank", ) parser.add_argument( "--lora_dropout", type=float, default=0.1, help="LORA dropout", ) parser.add_argument( "--orth_reg_weight", type=float, default=0.5, help="Orthogonal regularization weight", ) parser.add_argument( "--debug_mode", action="store_true", help="Whether to use debug mode", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def load_streaming_dataset(dataset_name, dataset_config_name, split, **kwargs): if "+" in split: # load multiple splits separated by the `+` symbol *with* streaming mode dataset_splits = [ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=True, **kwargs) for split_name in split.split("+") ] # interleave multiple splits to form one dataset interleaved_dataset = interleave_datasets(dataset_splits) return interleaved_dataset else: # load a single split *with* streaming mode dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=True, **kwargs) return dataset def prepare_dataset_wrapper(do_lower_case, do_remove_punctuation, processor, normalizer): def prepare_dataset(batch): # load and (possibly) resample audio data to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = processor.feature_extractor( audio["array"], sampling_rate=audio["sampling_rate"] ).input_features[0] # compute input length of audio sample in seconds batch["input_length"] = len(audio["array"]) / audio["sampling_rate"] # optional pre-processing steps transcription = batch["sentence"] if do_lower_case: transcription = transcription.lower() if do_remove_punctuation: transcription = normalizer(transcription).strip() # encode target text to label ids batch["labels"] = processor.tokenizer(transcription).input_ids return batch return prepare_dataset def save_model_hook(models, weights, output_dir): for model in models: model.save_pretrained(output_dir) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): while len(models) > 0: model = models.pop() # pop models so that they are not loaded again PeftModel.from_pretrained(model.base_model.model, input_dir) @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch def get_audio_length_processor(max_input_length): def is_audio_in_length_range(length): return length < max_input_length return is_audio_in_length_range def evaluation_loop(model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator): model.eval() predictions = [] references = [] normalized_predictions = [] normalized_references = [] device_type = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" for _, batch in enumerate(tqdm(eval_dataloader)): with torch.amp.autocast(device_type=device_type): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"], forced_decoder_ids=forced_decoder_ids, max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, processor.tokenizer.pad_token_id) decoded_preds = processor.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = processor.tokenizer.batch_decode(labels, skip_special_tokens=True) predictions.extend(decoded_preds) references.extend(decoded_labels) normalized_predictions.extend([normalizer(pred).strip() for pred in decoded_preds]) normalized_references.extend([normalizer(label).strip() for label in decoded_labels]) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute(predictions=predictions, references=references) normalized_wer = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references) eval_metrics = {"eval/wer": wer, "eval/normalized_wer": normalized_wer} if accelerator.get_tracker("wandb"): sample_size = min(len(predictions), 256) ids = [randint(0, len(predictions) - 1) for p in range(0, sample_size)] sample_predictions = [predictions[i] for i in ids] sample_references = [references[i] for i in ids] sample_normalized_predictions = [normalized_predictions[i] for i in ids] sample_normalized_references = [normalized_references[i] for i in ids] table_rows = [ list(r) for r in zip( sample_predictions, sample_references, sample_normalized_predictions, sample_normalized_references ) ] eval_metrics["eval_samples"] = wandb.Table( columns=["predictions", "references", "normalized_predictions", "normalized_references"], rows=table_rows, ) return eval_metrics def main(): args = parse_args() accelerator_kwargs = {"gradient_accumulation_steps": args.gradient_accumulation_steps} if args.with_tracking: accelerator_kwargs["log_with"] = args.report_to accelerator_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(**accelerator_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: api = HfApi(token=args.hub_token) # Create repo (repo_name from args or inferred) repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name repo_id = api.create_repo(repo_name, exist_ok=True).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # load dataset either in streaming mode or not processor = WhisperProcessor.from_pretrained(args.model_name_or_path, language=args.language, task=args.task) normalizer = BasicTextNormalizer() prepare_dataset = prepare_dataset_wrapper(args.do_lower_case, args.do_remove_punctuation, processor, normalizer) is_audio_in_length_range = get_audio_length_processor(args.max_audio_input_length) data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor) if args.dataset_in_streaming_mode: raw_datasets = IterableDatasetDict() loading_method = load_streaming_dataset else: raw_datasets = DatasetDict() loading_method = load_dataset if args.debug_mode: train_split = "train[:100]" test_split = "test[:10]" else: train_split = "train+validation" test_split = "test" raw_datasets["train"] = loading_method(args.dataset_name, args.language_abbr, split=train_split) raw_datasets["test"] = loading_method(args.dataset_name, args.language_abbr, split=test_split) raw_datasets = raw_datasets.cast_column("audio", Audio(sampling_rate=16000)) logger.info("Dataset loaded: %s", raw_datasets) logger.info(f"{raw_datasets['train'][0]}") vectorized_datasets = raw_datasets.map( prepare_dataset, remove_columns=list(next(iter(raw_datasets.values())).features), num_proc=args.preprocessing_num_workers, ).with_format("torch") if args.dataset_in_streaming_mode: vectorized_datasets["train"] = vectorized_datasets["train"].shuffle( buffer_size=args.buffer_size, seed=args.seed, ) # filter out audio files that are too long from the training set is_audio_in_length_range = get_audio_length_processor(args.max_audio_input_length) vectorized_datasets["train"] = vectorized_datasets["train"].filter( is_audio_in_length_range, input_columns=["input_length"] ) # get dataloaders train_dataloader = DataLoader( vectorized_datasets["train"], batch_size=args.per_device_train_batch_size, shuffle=True, collate_fn=data_collator, num_workers=args.dataloader_num_workers, pin_memory=args.dataloader_pin_memory, ) eval_dataloader = DataLoader( vectorized_datasets["test"], batch_size=args.per_device_eval_batch_size, collate_fn=data_collator, num_workers=args.dataloader_num_workers, pin_memory=args.dataloader_pin_memory, ) # metric metric = evaluate.load("wer") # model model = WhisperForConditionalGeneration.from_pretrained( args.model_name_or_path, quantization_config=BitsAndBytesConfig(load_in_8bit=True) ) model.config.forced_decoder_ids = None model.config.suppress_tokens = [] if hasattr(model, "hf_device_map") and len(set(model.hf_device_map.values()).intersection({"cpu", "disk"})) > 0: raise ValueError("Training on CPU or disk is not supported.") if hasattr(model, "hf_device_map") and len(set(model.hf_device_map.values())) > 1: device_map = model.hf_device_map.copy() # required because `labels` are on main execution device (0) while the output of `proj_out` is on other device. # So, this leads to device mismatch error when calculation cross-entropy between logits and labels. # Won't arise during inference as `labels` aren't supplied during that time # instead of changing device of one of the tied modules, I have to do this for all tied modules # else the execution device of remaining tied modules isn't changed device_map["model.decoder.embed_tokens"] = model._hf_hook.execution_device device_map["model.decoder.embed_positions"] = model._hf_hook.execution_device device_map["proj_out"] = model._hf_hook.execution_device dispatch_model(model, device_map=device_map) # preparing peft model if args.use_peft: from peft import prepare_model_for_kbit_training model = prepare_model_for_kbit_training(model) # as Whisper model uses Conv layer in encoder, checkpointing disables grad computation # to avoid this, make the inputs trainable def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.model.encoder.conv1.register_forward_hook(make_inputs_require_grad) # Calculate total steps first for AdaLoRA if args.max_train_steps is None: num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) total_steps = args.num_train_epochs * num_update_steps_per_epoch else: total_steps = args.max_train_steps # wrapping model with adalora tuner if args.use_adalora: config = AdaLoraConfig( init_r=args.init_r, target_r=args.target_r, beta1=0.85, beta2=0.85, tinit=args.tinit, tfinal=args.tfinal, deltaT=args.delta_t, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], orth_reg_weight=args.orth_reg_weight, total_step=total_steps, ) else: config = LoraConfig( r=args.r, lora_alpha=args.lora_alpha, target_modules=["q_proj", "v_proj"], lora_dropout=args.lora_dropout, ) model = get_peft_model(model, config) model.print_trainable_parameters() # optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) if args.max_train_steps is None: num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # scheduler lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) accelerator.print(model) # Note here that the max steps is adjusted by the accelerator's num_processes args.max_train_steps = math.ceil(args.max_train_steps / accelerator.num_processes) if args.use_peft and args.use_adalora: # Update the total_step in the config to reflect the adjusted max_train_steps # Handle DDP case where model is wrapped if hasattr(model, "module"): # DDP case model.module.base_model.peft_config["default"].total_step = args.max_train_steps else: # Non-DDP case model.base_model.peft_config["default"].total_step = args.max_train_steps # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: run_name = f"run-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}" experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers( "Whisper PEFT Fine-Tuning", config=experiment_config, init_kwargs={"wandb": {"name": run_name}} ) # saving and loading checkpoints for resuming training accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) global_step = 0 starting_epoch = 0 best_metric = None resume_step = 0 forced_decoder_ids = processor.get_decoder_prompt_ids(language=args.language, task=args.task) # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) training_difference = os.path.splitext(path)[0] global_step = resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # We need to adjust the progress bar to the current step progress_bar.update(resume_step) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 running_loss = 0 for step, batch in enumerate(accelerator.skip_first_batches(train_dataloader, num_batches=resume_step)): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() # Update the importance of low-rank matrices # and allocate the budget accordingly. # This is only needed for AdaLora. # Note that this requires parameter gradients. # Hence being called before optimizer.zero_grad(). if args.use_peft and args.use_adalora: # Handle DDP case where model is wrapped if hasattr(model, "module"): # DDP case peft_model = model.module else: # Non-DDP case peft_model = model # Check if rank_pattern exists before calling update_and_allocate if ( hasattr(peft_model, "peft_config") and peft_model.peft_config["default"].rank_pattern is not None and global_step >= args.tinit # Only start updating after tinit steps ): peft_model.update_and_allocate(global_step) optimizer.zero_grad() global_step += 1 progress_bar.update(1) if args.with_tracking: step_loss = accelerator.reduce(loss.detach().clone()).item() total_loss += step_loss running_loss += step_loss if global_step % args.checkpointing_steps == 0: output_dir = os.path.join(args.output_dir, f"step_{global_step}") accelerator.save_state(output_dir) if global_step % args.logging_steps == 0: if args.with_tracking: accelerator.log({"train/running_loss": running_loss / args.logging_steps}, step=global_step) running_loss = 0 if global_step % args.evaluation_steps == 0: eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: logger.info(f"Step {global_step} eval metrics: {eval_metrics}") accelerator.log(eval_metrics, step=global_step) if best_metric is None or eval_metrics["eval/wer"] < best_metric: best_metric = eval_metrics["eval/wer"] accelerator.save_state(os.path.join(args.output_dir, "best_checkpoint")) model.train() if global_step >= args.max_train_steps: break if args.with_tracking: train_epoch_loss = total_loss / (step + 1) logger.info(f"Epoch {epoch} train loss: {train_epoch_loss}") accelerator.log({"epoch/train_loss": train_epoch_loss}, step=epoch) if args.push_to_hub and epoch <= args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process) # evaluate the model at the end of training eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: logger.info(f"Step {global_step} eval metrics: {eval_metrics}") accelerator.log(eval_metrics, step=global_step) if best_metric is None or eval_metrics["eval/wer"] < best_metric: best_metric = eval_metrics["eval/wer"] accelerator.save_state(os.path.join(args.output_dir, "best_checkpoint")) if accelerator.is_main_process: processor.tokenizer.save_pretrained(args.output_dir) api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message=f"Training in progress epoch {epoch}", run_as_future=True, ) if args.load_best_model: # load the best model accelerator.load_state(os.path.join(args.output_dir, "best_checkpoint")) # Handle DDP case where model is wrapped if hasattr(model, "module"): # DDP case peft_model = model.module else: # Non-DDP case peft_model = model # Only resize if rank_pattern exists if hasattr(peft_model, "peft_config") and peft_model.peft_config["default"].rank_pattern is not None: peft_model.resize_modules_by_rank_pattern(peft_model.peft_config["default"].rank_pattern, "default") eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: best_metrics = {"best_" + k: v for k, v in eval_metrics.items()} accelerator.log(best_metrics, step=global_step) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process) if accelerator.is_main_process: processor.tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: eval_metrics.pop("eval_samples") json.dump(eval_metrics, f) if __name__ == "__main__": main()
peft/examples/int8_training/peft_adalora_whisper_large_training.py/0
{ "file_path": "peft/examples/int8_training/peft_adalora_whisper_large_training.py", "repo_id": "peft", "token_count": 14189 }
229
# MiSS: Balancing LoRA Performance and Efficiency with Simple Shard Sharing ## Introduction ([Paper](https://huggingface.co/papers/2409.15371), [code](https://github.com/JL-er/MiSS)) MiSS (Matrix Shard Sharing) is a novel PEFT method that adopts a low-rank structure, requires only a single trainable matrix, and introduces a new update mechanism distinct from LoRA, achieving an excellent balance between performance and efficiency. ## Quick Start ```python import torch from peft import MissConfig, get_peft_model from transformers import AutoTokenizer, AutoModelForCausalLM from trl import SFTConfig, SFTTrainer from datasets import load_dataset model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", torch_dtype=torch.bfloat16, device_map="auto") tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") tokenizer.pad_token_id = tokenizer.eos_token_id miss_config = MissConfig( r = 64 ) #bat: In this mode, you can enable nonlinear updates across different shards. # miss_config = MissConfig( # r = 64, # init_weights="bat" # ) # mini: In this mode, you can set a smaller rank to use fewer trainable parameters, but it is recommended to keep `out_features % mini_r == 0`. # miss_config = MissConfig( # r = 64, # init_weights="mini", # mini_r = 8 # ) peft_model = get_peft_model(model, miss_config) peft_model.print_trainable_parameters() dataset = load_dataset("imdb", split="train[:1%]") training_args = SFTConfig(dataset_text_field="text", max_seq_length=128) trainer = SFTTrainer( model=peft_model, args=training_args, train_dataset=dataset, processing_class=tokenizer, ) trainer.train() peft_model.save_pretrained("miss-llama-2-7b") ``` To utilize the fine-tuned MiSS modules, simply run the following command: ```python import torch from peft import PeftModel from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained( "meta-llama/Llama-2-7b-hf", torch_dtype=torch.bfloat16, device_map="auto" ) peft_model = PeftModel.from_pretrained(model, "miss-llama-2-7b") ``` ## Advanced Usage ### Fine-tune ```shell #Bat performs better than MiSS, but it uses more memory and is twice as slow. If you want to use the Bat method, you only need to add the parameter init_weights="bat". python miss_finetuning.py \ --base_model_name_or_path meta-llama/Llama-2-7b-hf \ --output_dir output/miss-llama-2-7b-metamath-10k \ --miss_r 64 \ --init_weights True \ --bits bf16 \ --data_path meta-math/MetaMathQA \ --dataset_split train[:100000] \ --dataset_field query response \ --bf16 True \ --num_train_epochs 1 \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 8 \ --save_strategy "steps" \ --save_steps 1000 \ --save_total_limit 1 \ --logging_steps 1 \ --learning_rate 2e-5 \ --weight_decay 0. \ --warmup_ratio 0.03 \ --tf32 True \ --report_to none ``` # Citation ```bib @misc{kang2025balancingloraperformanceefficiency, title={Balancing LoRA Performance and Efficiency with Simple Shard Sharing}, author={Jiale Kang and Qingyu Yin}, year={2025}, eprint={2409.15371}, archivePrefix={arXiv}, primaryClass={cs.CL}, url={https://arxiv.org/abs/2409.15371}, }
peft/examples/miss_finetuning/README.md/0
{ "file_path": "peft/examples/miss_finetuning/README.md", "repo_id": "peft", "token_count": 1236 }
230
#!/usr/bin/env python3 """ Training script for fine-tuning language models with QALoRA using GPTQ quantization. This script supports cached quantization to avoid repeating expensive quantization processes. """ import argparse import os import torch from datasets import load_dataset from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, GPTQConfig, Trainer, TrainingArguments, ) from peft import LoraConfig, get_peft_model def load_or_quantize_model( base_model: str, tokenizer, bits: int = 4, cache_dir: str = "./quantized_models" ) -> AutoModelForCausalLM: """ Load a pre-quantized model from cache or quantize and cache a new one. Automatically detects if the model is already GPTQ-quantized. Args: base_model: Model identifier or path tokenizer: Tokenizer for the model bits: Bit-width for quantization (default: 4) cache_dir: Directory to store quantized models Returns: The loaded (quantized) model """ # First, check if the model is already GPTQ-quantized by trying to load it print(f"Checking if {base_model} is already GPTQ-quantized...") try: # Try to load the model and check if it has GPTQ quantization test_model = AutoModelForCausalLM.from_pretrained( base_model, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True, # Some GPTQ models might need this ) # Check if the model has GPTQ quantization attributes has_gptq = False for module in test_model.modules(): if hasattr(module, "qweight") or hasattr(module, "qzeros") or "gptq" in str(type(module)).lower(): has_gptq = True break if has_gptq: print(f"✅ Model {base_model} is already GPTQ-quantized. Using directly.") return test_model else: print(f"Model {base_model} is not GPTQ-quantized. Will quantize it.") # Clean up the test model to free memory del test_model if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.xpu.is_available(): torch.xpu.empty_cache() except Exception as e: print(f"Could not load model {base_model} directly: {e}") print("Will attempt to quantize it...") # If we get here, the model needs to be quantized os.makedirs(cache_dir, exist_ok=True) model_id = base_model.replace("/", "_").replace("\\", "_") # Handle Windows paths too quantized_model_path = os.path.join(cache_dir, f"{model_id}_gptq_{bits}bit") # Check if we already have a cached quantized version if os.path.exists(quantized_model_path) and os.path.exists(os.path.join(quantized_model_path, "config.json")): print(f"Loading pre-quantized model from cache: {quantized_model_path}") return AutoModelForCausalLM.from_pretrained(quantized_model_path, device_map="auto") print(f"Quantizing model and saving to cache: {quantized_model_path}") # Configure GPTQ for first-time quantization gptq_config = GPTQConfig( bits=bits, dataset="c4", tokenizer=tokenizer, group_size=128, desc_act=False, sym=False, ) # Load and quantize the model model = AutoModelForCausalLM.from_pretrained( base_model, device_map="auto", quantization_config=gptq_config, torch_dtype=torch.float16 ) # Save the quantized model to cache print(f"Saving quantized model to {quantized_model_path}") model.save_pretrained(quantized_model_path) tokenizer.save_pretrained(quantized_model_path) return model def tokenize_and_preprocess(examples, tokenizer, max_length: int = 128): """ Tokenize text data and prepare it for language modeling. Args: examples: Dataset examples with 'text' field tokenizer: Tokenizer to use max_length: Maximum sequence length Returns: Processed examples with input_ids and labels """ # Tokenize the text with truncation and padding tokenized_output = tokenizer(examples["text"], truncation=True, padding="max_length", max_length=max_length) # Preprocess labels (set pad tokens to -100 for loss masking) labels = tokenized_output["input_ids"].copy() labels = [[-100 if token == tokenizer.pad_token_id else token for token in seq] for seq in labels] tokenized_output["labels"] = labels return tokenized_output def train_model( base_model: str, data_path: str, data_split: str, output_dir: str, batch_size: int, num_epochs: int, learning_rate: float, cutoff_len: int, use_qalora: bool, eval_step: int, save_step: int, device: str, lora_r: int, lora_alpha: int, lora_dropout: float, lora_target_modules: str, push_to_hub: bool, qalora_group_size: int, bits: int, ) -> None: """ Train a model with QALoRA and GPTQ quantization. Args: base_model: Base model to fine-tune data_path: Dataset path output_dir: Directory to save model outputs batch_size: Training batch size num_epochs: Number of training epochs learning_rate: Learning rate cutoff_len: Maximum sequence length val_set_size: Validation set size use_dora: Whether to use DoRA use_qalora: Whether to use QALoRA quantize: Whether to use quantization eval_step: Steps between evaluations save_step: Steps between saving checkpoints device: Device to use (cuda:0, xpu:0, etc.) lora_r: LoRA rank lora_alpha: LoRA alpha lora_dropout: LoRA dropout rate lora_target_modules: Target modules for LoRA push_to_hub: Whether to push to Hugging Face Hub """ os.environ["TOKENIZERS_PARALLELISM"] = "false" hf_token = os.getenv("HF_TOKEN") device = torch.device(device) print(f"Using device: {device}") # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # Load or quantize model model = load_or_quantize_model(base_model, tokenizer, bits=bits) # Configure LoRA target_modules = ( lora_target_modules.split(",") if lora_target_modules else ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"] ) print("use_qalora", use_qalora) lora_config = LoraConfig( task_type="CAUSAL_LM", use_qalora=use_qalora, qalora_group_size=qalora_group_size, r=lora_r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=lora_dropout, bias="none", ) # Get PEFT model with adapters model = get_peft_model(model, lora_config) model.print_trainable_parameters() # Move model to device if not already there if not hasattr(model, "device") or model.device.type != device.type: model = model.to(device) # Load and prepare dataset dataset = load_dataset(data_path, data_split) tokenized_datasets = { "train": dataset["train"].map( lambda x: tokenize_and_preprocess(x, tokenizer, max_length=cutoff_len), batched=True, remove_columns=["text"], load_from_cache_file=True, ), "test": dataset["test"].map( lambda x: tokenize_and_preprocess(x, tokenizer, max_length=cutoff_len), batched=True, remove_columns=["text"], load_from_cache_file=True, ), } # Data collator for language modeling data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False) # Configure training arguments training_args = TrainingArguments( output_dir=output_dir, num_train_epochs=num_epochs, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, warmup_steps=100, weight_decay=0.01, logging_dir="./logs", logging_steps=eval_step, save_steps=save_step, save_total_limit=2, push_to_hub=push_to_hub, gradient_accumulation_steps=16, fp16=True, learning_rate=learning_rate, hub_token=hf_token, label_names=["labels"], ) # Clear accelerator cache to free memory if torch.cuda.is_available(): torch.cuda.empty_cache() elif torch.xpu.is_available(): torch.xpu.empty_cache() # Initialize trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["test"], data_collator=data_collator, ) # Start training print("\nStarting training...") trainer.train() # Save the final model if push_to_hub: trainer.push_to_hub(commit_message="Fine-tuned model with QALoRA") # Always save locally model.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) print(f"\nTraining complete. Model saved to {output_dir}") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Fine-tune LLMs with QALoRA and GPTQ quantization") # Model and dataset parameters parser.add_argument("--base_model", type=str, default="TheBloke/Llama-2-7b-GPTQ", help="Base model path or name") parser.add_argument( "--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name" ) parser.add_argument("--data_split", type=str, default="", help="Dataset path or name") parser.add_argument( "--output_dir", type=str, default="./qalora_output", help="Output directory for the fine-tuned model" ) parser.add_argument("--bits", type=int, default=4, help="Init quantization bits") # Training parameters parser.add_argument("--batch_size", type=int, default=4, help="Batch size") parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs") parser.add_argument("--learning_rate", type=float, default=3e-4, help="Learning rate") parser.add_argument("--cutoff_len", type=int, default=128, help="Max sequence length") # Adapter configuration parser.add_argument("--use_qalora", action="store_true", help="Apply QALoRA") parser.add_argument("--qalora_group_size", type=int, default=32, help="LoRA rank") parser.add_argument("--lora_r", type=int, default=8, help="LoRA rank") parser.add_argument("--lora_alpha", type=int, default=16, help="LoRA alpha") parser.add_argument("--lora_dropout", type=float, default=0.05, help="LoRA dropout rate") parser.add_argument( "--lora_target_modules", type=str, default=None, help="Comma-separated list of target modules for LoRA" ) # Training process options parser.add_argument("--eval_step", type=int, default=100, help="Evaluation step interval") parser.add_argument("--save_step", type=int, default=500, help="Save step interval") parser.add_argument("--device", type=str, default="auto", help="Device to use for training") # Hugging Face Hub options parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub") args = parser.parse_args() device = args.device if args.device == "auto": device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" # If use_qalora isn't explicitly set in args but passed to train_model if not args.use_qalora: args.use_qalora = True # Default to True as in the original code train_model( base_model=args.base_model, data_path=args.data_path, data_split=args.data_split, output_dir=args.output_dir, batch_size=args.batch_size, num_epochs=args.num_epochs, learning_rate=args.learning_rate, cutoff_len=args.cutoff_len, use_qalora=args.use_qalora, eval_step=args.eval_step, save_step=args.save_step, device=device, lora_r=args.lora_r, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, lora_target_modules=args.lora_target_modules, push_to_hub=args.push_to_hub, qalora_group_size=args.qalora_group_size, bits=args.bits, )
peft/examples/qalora_finetuning/qalora_gptq_finetuning.py/0
{ "file_path": "peft/examples/qalora_finetuning/qalora_gptq_finetuning.py", "repo_id": "peft", "token_count": 5177 }
231
<jupyter_start><jupyter_text>Using VB-LoRA for sequence classification In this example, we fine-tune Roberta on a sequence classification task using VB-LoRA.This notebook is adapted from `examples/sequence_classification/VeRA.ipynb`. Imports<jupyter_code>import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_model, VBLoRAConfig, PeftType, ) import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup from tqdm import tqdm<jupyter_output><empty_output><jupyter_text>Parameters<jupyter_code>batch_size = 32 model_name_or_path = "roberta-large" task = "mrpc" peft_type = PeftType.VBLORA device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" num_epochs = 20 rank = 4 max_length = 128 num_vectors = 90 vector_length = 256 torch.manual_seed(0) peft_config = VBLoRAConfig( task_type="SEQ_CLS", r=rank, topk=2, target_modules=['key', 'value', 'query', 'output.dense', 'intermediate.dense'], num_vectors=num_vectors, vector_length=vector_length, save_only_topk_weights=True, # Set to True to reduce storage space. Note that the saved parameters cannot be used to resume training from checkpoints. vblora_dropout=0., ) head_lr = 4e-3 vector_bank_lr = 1e-3 logits_lr = 1e-2<jupyter_output><empty_output><jupyter_text>Loading data<jupyter_code>if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=max_length) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size )<jupyter_output><empty_output><jupyter_text>Preparing the VB-LoRA model<jupyter_code>model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True, max_length=None) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model.print_savable_parameters() from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS from transformers.trainer_pt_utils import get_parameter_names decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS) decay_parameters = [name for name in decay_parameters if "bias" not in name] vector_bank_parameters = [name for name, _ in model.named_parameters() if "vector_bank" in name] logits_parameters = [name for name, _ in model.named_parameters() if "logits" in name ] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if n in decay_parameters and \ n not in logits_parameters and n not in vector_bank_parameters], "weight_decay": 0.1, "lr": head_lr, }, { "params": [p for n, p in model.named_parameters() if n not in decay_parameters and \ n not in logits_parameters and n not in vector_bank_parameters], "weight_decay": 0.0, "lr": head_lr, }, { "params": [p for n, p in model.named_parameters() if n in vector_bank_parameters], "lr": vector_bank_lr, "weight_decay": 0.0, }, { "params": [p for n, p in model.named_parameters() if n in logits_parameters], "lr": logits_lr, "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)<jupyter_output>0%| | 0/115 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|██████████| 115/115 [00:34<00:00, 3.33it/s] 100%|██████████| 13/13 [00:01<00:00, 7.84it/s]<jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>account_id = ... # your Hugging Face Hub account ID model.push_to_hub(f"{account_id}/roberta-large-peft-vblora")<jupyter_output><empty_output><jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch from peft import PeftModel, PeftConfig from transformers import AutoTokenizer peft_model_id = f"{account_id}/roberta-large-peft-vblora" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)<jupyter_output>0%| | 0/13 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. 100%|██████████| 13/13 [00:01<00:00, 7.81it/s]
peft/examples/sequence_classification/VBLoRA.ipynb/0
{ "file_path": "peft/examples/sequence_classification/VBLoRA.ipynb", "repo_id": "peft", "token_count": 2895 }
232
accelerate launch --config_file "configs/fsdp_config.yaml" train.py \ --seed 100 \ --model_name_or_path "hugging-quants/Meta-Llama-3.1-8B-Instruct-GPTQ-INT4" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --eval_strategy "epoch" \ --save_strategy "epoch" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "llama3-8B-gptq-sft-lora-fsdp" \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --gradient_accumulation_steps 4 \ --gradient_checkpointing True \ --use_reentrant False \ --dataset_text_field "content" \ --use_flash_attn True \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "q_proj,k_proj,v_proj,o_proj,up_proj,gate_proj" \ --use_4bit_quantization False
peft/examples/sft/run_peft_fsdp_gptq.sh/0
{ "file_path": "peft/examples/sft/run_peft_fsdp_gptq.sh", "repo_id": "peft", "token_count": 469 }
233
from mistralrs import ChatCompletionRequest, Runner, Which runner = Runner( which=Which.XLora( tok_model_id=None, # Automatically determine from ordering file model_id=..., # Model ID of the base model (local path of HF model ID) xlora_model_id=..., # X-LoRA Model ID of the base model (local path of HF model ID) order=..., # Ordering file to ensure compatability with PEFT tgt_non_granular_index=3, # Only generate scalings for the first 3 decoding tokens, and then use the last generated one ) ) res = runner.send_chat_completion_request( ChatCompletionRequest( model="mistral", messages=[{"role": "user", "content": "Tell me a story about 2 low rank matrices."}], max_tokens=256, presence_penalty=1.0, top_p=0.1, temperature=0.5, ) ) print(res.choices[0].message.content) print(res.usage)
peft/examples/xlora/xlora_inference_mistralrs.py/0
{ "file_path": "peft/examples/xlora/xlora_inference_mistralrs.py", "repo_id": "peft", "token_count": 356 }
234
{ "auto_mapping": null, "base_model_name_or_path": null, "inference_mode": false, "num_attention_heads": 24, "num_layers": 28, "num_transformer_submodules": 1, "num_virtual_tokens": 200, "peft_type": "PROMPT_TUNING", "prompt_tuning_init": "RANDOM", "prompt_tuning_init_text": null, "revision": null, "task_type": "CAUSAL_LM", "token_dim": 3072, "tokenizer_kwargs": null, "tokenizer_name_or_path": null }
peft/method_comparison/MetaMathQA/experiments/prompt_tuning/llama-3.2-3B-default/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/prompt_tuning/llama-3.2-3B-default/adapter_config.json", "repo_id": "peft", "token_count": 190 }
235
--- title: PEFT Method Comparison sdk: gradio app_file: app.py pinned: false emoji: ⚖️ --- # Comparison of PEFT Methods The goal of this project is to provide replicable experiments that produce outcomes allowing us to compare different PEFT methods with one another. This gives you more information to make an informed decision about which methods best fit your use case and what trade-offs to expect. Visit our [Gradio Space](https://huggingface.co/spaces/peft-internal-testing/PEFT-method-comparison) to check the results. ## Community Contributions We envision the PEFT method comparison project as an ongoing endeavor with heavy involvement from the community. As maintainers, it is impossible for us to know all the perfect hyperparameters for each method or to predict all the use cases that PEFT users may have. As a consequence, community contributions are very welcome. Below, we outline all the ways you can contribute to this project. ### Creating New Experiments Creating a new experiment requires setting up a new PEFT configuration for us to test. This will result in one more data point being added to the total comparison. Working on this is especially relevant if: 1. You are the author of a paper whose method is introduced in PEFT, or worked on the PEFT integration, and know what hyperparameters work best. 2. You have experience with a specific method and want to share your knowledge with the community. Of course, you can contribute even without meeting these criteria. Please follow the instructions below. #### How to Add New Experiments Start by navigating to one of the existing experiment folders, e.g. `peft/method_comparison/MetaMathQA`, if your experiment involves using the [MetaMathQA dataset](https://huggingface.co/datasets/meta-math/MetaMathQA). There, create a new directory inside the `experiments/<method-name>` folder using a descriptive name. For example, if you want to test LoRA with rank 123 using Llama-3.2 3B as the base model, you could name the folder `experiments/lora/llama-3.2-3B-rank123`. Inside this directory, you will find a default configuration file called `default_training_params.json`, which contains the default parameters used in the `run.py` training script. Create a new JSON file containing all the parameters you want to modify compared to the defaults, and save it as `training_params.json` in the newly created folder. If you are satisfied with all the default training parameters, you can skip this step. Finally, you need to create a PEFT configuration file for the PEFT method you want to add. This should be a JSON file called `adapter_config.json`, placed in the same directory. Below is an example of how this could look: ```python from peft import LoraConfig config = LoraConfig(r=123) config.save_pretrained("experiments/lora/llama-3.2-3B-rank123/") ``` Once you've created the configuration files for your experiment, please [create a PR on PEFT](https://github.com/huggingface/peft/pulls). After it is reviewed and merged, we will run it on our hardware to ensure that the results are comparable. Of course, it is best if you run the experiment at least once on your hardware to verify that the proposed settings work well. #### Considerations When Adding New Experiments When adding a new experiment, please consider the following points: 1. Avoid changing too many training parameters at once, as this would make it difficult to compare results with existing ones. For example, if all existing results were created with 5000 training steps but your result uses 10000 steps, it would be unclear whether an improvement in the test score is due to the PEFT method itself or simply due to longer training. Similarly, using a completely different base model, especially if it is significantly more capable, does not contribute to a fair comparison. 2. Avoid suggesting configurations that are very close to existing ones. For example, if there is already an experiment with LoRA and rank 123, do not add an experiment with LoRA and rank 124. 3. Experiments for less-tested methods are more valuable than additional experiments for widely tested methods. 4. Do not edit existing experiments, always create new ones. 5. If you found hyper parameters that work especially well with a given method but are not trivial to find out, consider updating the PEFT documentation of that method so that other users can benefit from your findings. ### Updating the Training Script We provide a training script that includes features typically useful for improving training outcomes, such as AMP support, a cosine learning rate schedule, etc. However, there is always room for improvement. For example, at the time of writing, the script does not support gradient accumulation. Therefore, PRs that extend the training script are welcome. #### How to Update the Training Script Follow the same process as when contributing to PEFT in general (see the [contribution guidelines](https://huggingface.co/docs/peft/developer_guides/contributing)). If the same training script is used across multiple datasets, please ensure that all relevant scripts are updated accordingly. #### Considerations When Updating the Training Script 1. Updates should be backward-compatible. By default, any new features should be disabled to ensure that existing results remain valid. For example, if you add gradient accumulation, ensure it is disabled by default so that new experiments must opt in. 2. Before adding a bug fix that could invalidate existing results, consider whether the trade-off is worthwhile. If we already have many experimental results, rerunning all of them can be expensive. If the bug fix is not critical, it may not be worth invalidating previous results. However, if you discover a significant bug that could meaningfully impact outcomes, it should be addressed. 3. Avoid unnecessary complexity. While we could add support for DeepSpeed, FSDP, etc., doing so would add significant complexity, exclude users with limited hardware, and is unlikely to alter the relative performance of different PEFT methods. 4. Minimize reliance on specific training frameworks. For example, we deliberately avoid using the `Trainer` class from transformers or PyTorch Lightning. This ensures transparency, making it easier to understand the training process and replicate results over time. If a training framework were used, we would have to pin the version or risk future incompatibilities. ### Adding a New Dataset Adding a new dataset increases the breadth and usefulness of the PEFT method comparison. The goal is not necessarily to outperform benchmarks or replicate paper results, but to fairly compare different PEFT methods in a way that is useful for PEFT users. If this involves replicating an experiment from a paper, that is great, but it is not a requirement. #### How to Add a New Dataset The easiest way to add support for a new dataset is to copy an existing setup, such as `method_comparison/MetaMathQA`, rename it, and modify `data.py`, as well as any other necessary parts of the code. Ideally, as much existing code as possible should be reused. The general folder structure and experiment logging format should remain consistent. After adding the dataset, ensure it functions correctly and produces meaningful results by running at least one experimental setup, such as using LoRA with default settings. #### Considerations When Adding a New Dataset 1. Before beginning, it is best to open an [issue on PEFT](https://github.com/huggingface/peft/issues) to share your plans. This allows for early feedback and prevents wasted effort on impractical ideas. 2. The most valuable new datasets are those that test different capabilities than those already present. Bonus points if the task is similar to what users may face in the real world. Task ideas that would be great to add: - A task involving both language and image modalities. - An image generation task (like stable diffusion) - A task involving audio (like whisper) - A task that requires knowledge preservation (checked, for instance, via an auxiliary test set) - Learning something completely new (e.g. a new language) - A reinforcement learning task (e.g. using [trl](https://github.com/huggingface/trl)) 3. Training should be reasonably fast. Running dozens of experiments is impractical if each one takes multiple days and incurs high costs. Ideally, training should take a few hours at most on high-end consumer hardware. 4. The chosen base model should not be too large, to avoid VRAM constraints. Morevoer, if the base model is too powerful, there is little room for improvement through further fine-tuning. 5. Test scores should be informative and have a broad range: - Besides loss, there should ideally be at least one additional metric, such as accuracy. - Comparisons are not meaningful if all methods score near 0% or near 100%. The dataset should yield a range of scores to facilitate meaningful differentiation between methods. 6. The dataset should be publicly available and have a track record as a useful dataset. The license should permit the intended usage. ## Result dashboard For convenience, we included a [Gradio](https://www.gradio.app/) app that shows the results of the experiments. It allows you to filter down the task and base model and show the experiment results for this selection. Give it a try [here](https://huggingface.co/spaces/peft-internal-testing/PEFT-method-comparison). ### Local deployment This app requires additional packages to be installed, please install the packages listed in `requirements-app.txt`, e.g. via: ```sh python -m pip install -r requirements-app.txt ``` To launch the demo, run: ```sh python app.py ```
peft/method_comparison/README.md/0
{ "file_path": "peft/method_comparison/README.md", "repo_id": "peft", "token_count": 2232 }
236
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional import torch from .utils import PeftType if TYPE_CHECKING: from .config import PeftConfig from .tuners.tuners_utils import BaseTuner # these will be filled by the register_peft_method function PEFT_TYPE_TO_CONFIG_MAPPING: dict[PeftType, type[PeftConfig]] = {} PEFT_TYPE_TO_TUNER_MAPPING: dict[PeftType, type[BaseTuner]] = {} PEFT_TYPE_TO_MIXED_MODEL_MAPPING: dict[PeftType, type[BaseTuner]] = {} PEFT_TYPE_TO_PREFIX_MAPPING: dict[PeftType, str] = {} def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig: """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def inject_adapter_in_model( peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default", low_cpu_mem_usage: bool = False, state_dict: Optional[dict[str, torch.Tensor]] = None, ) -> torch.nn.Module: r""" A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods. Args: peft_config (`PeftConfig`): Configuration object containing the parameters of the Peft model. model (`torch.nn.Module`): The input model where the adapter will be injected. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. state_dict (`dict`, *optional*, defaults to `None`) If a state_dict is passed here, the adapters will be injected based on the entries of the state_dict. This can be useful when the exact `target_modules` of the PEFT method is unknown, for instance because the checkpoint was created without meta data. Note that the values from the state_dict are not used, only the keys are used to determine the correct layers that should be adapted. """ if peft_config.is_prompt_learning or peft_config.is_adaption_prompt: raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.") if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys(): raise ValueError( f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`." ) tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type] # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules. peft_model = tuner_cls( model, peft_config, adapter_name=adapter_name, low_cpu_mem_usage=low_cpu_mem_usage, state_dict=state_dict ) return peft_model.model
peft/src/peft/mapping.py/0
{ "file_path": "peft/src/peft/mapping.py", "repo_id": "peft", "token_count": 1345 }
237
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import importlib import math import warnings from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.init as init from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils import transpose from peft.utils.integrations import gather_params_ctx from .layer import LoraLayer class LoraParallelLinear(nn.Module, LoraLayer): """ When the target layer parallel_linear is RowParallelLinear, in order to keep the input and output shapes consistent, we need to split the lora matrix A into rows, and the lora_B at this time should be a complete linear layer; In the same way, when the target layer is ColumnParallelLinear, we perform column segmentation on lora_B, while lora_A is still a complete linear layer. """ def __init__( self, base_layer, adapter_name: str, backend, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, is_target_conv_1d_layer: bool = False, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ): if lora_bias: raise ValueError(f"{self.__class__.__name__} does not support lora_bias yet, set it to False") super().__init__() LoraLayer.__init__(self, base_layer=base_layer, **kwargs) if use_dora: raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") self.backend = backend self.is_parallel_a = isinstance(base_layer, backend.RowParallelLinear) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name megatron_config = kwargs["megatron_config"] parallel_linear_kwargs = {"megatron_config": megatron_config} init_method = init.xavier_normal_ if hasattr(megatron_config, "init_method"): init_method = megatron_config.init_method input_is_parallel = True gather_output = False if self.is_parallel_a: input_is_parallel = base_layer.input_is_parallel else: gather_output = base_layer.gather_output self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, init_method=init_method, input_is_parallel=input_is_parallel, gather_output=gather_output, **parallel_linear_kwargs, ) if is_target_conv_1d_layer: raise ValueError( f"{self.__class__.__name__} does not support target_conv_1d_layer yet, please set it to False" ) self.is_target_conv_1d_layer = False def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora=False, init_method=init.xavier_normal_, input_is_parallel=True, gather_output=False, **parallel_linear_kwargs, ): # collect the kwargs kwargs = locals().copy() del kwargs["self"] if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer megatron_config = parallel_linear_kwargs["megatron_config"] # lora needs to be forced to upgrade to 32-bit precision, otherwise it will overflow megatron_config.params_dtype = torch.float32 if self.is_parallel_a: lora_a = self.backend.RowParallelLinear( input_size=self.in_features, output_size=r, bias=False, input_is_parallel=input_is_parallel, skip_bias_add=True, init_method=init_method, config=megatron_config, ) lora_b = nn.Linear(in_features=r, out_features=self.out_features, bias=False, dtype=torch.float32) else: lora_a = nn.Linear(in_features=self.in_features, out_features=r, bias=False, dtype=torch.float32) lora_b = self.backend.ColumnParallelLinear( input_size=r, output_size=self.out_features, bias=False, gather_output=gather_output, init_method=init_method, config=megatron_config, ) self.lora_A[adapter_name] = lora_a self.lora_B[adapter_name] = lora_b if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r self.use_dora[adapter_name] = use_dora # for inits that require access to the base weight, use gather_param_ctx so that the weight is gathered when using DeepSpeed if isinstance(init_lora_weights, str) and init_lora_weights.startswith("pissa"): with gather_params_ctx(self.get_base_layer().weight): self.pissa_init(adapter_name, init_lora_weights) elif isinstance(init_lora_weights, str) and init_lora_weights.startswith("corda"): with gather_params_ctx(self.get_base_layer().weight): self.corda_init(adapter_name, init_lora_weights) elif isinstance(init_lora_weights, str) and init_lora_weights.lower() == "olora": with gather_params_ctx(self.get_base_layer().weight): self.olora_init(adapter_name) elif init_lora_weights == "loftq": with gather_params_ctx(self.get_base_layer().weight): self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # call this before dora_init self._move_adapter_to_device_of_base_layer(adapter_name) if adapter_name in self.lora_variant: self.lora_variant[adapter_name].init(self, **kwargs) self.set_adapter(self.active_adapters) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any): self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) # If weight is used for matrix multiplication here, the final aggregation operation of the original # parallel_linear layer will be missing, so we need to directly call its forward function to obtain the # output of the original parallel_linear layer. if self.disable_adapters: if self.merged: self.unmerge() result, bias = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: raise ValueError(f"{self.__class__.__name__} does not support mixed_batch_forward yet.") elif self.merged: result, bias = self.base_layer(x, *args, **kwargs) else: result, bias = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = self._cast_input_dtype(x, lora_A.weight.dtype) result = result + lora_B(lora_A(dropout(x))) * scaling result = result.to(torch_result_dtype) return result, bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() delta_weight = self.get_delta_weight(active_adapter) orig_weights = orig_weights + delta_weight if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: delta_weight = self.get_delta_weight(active_adapter) base_layer.weight.data = base_layer.weight.data + delta_weight self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight delta_weight = self.get_delta_weight(active_adapter) weight.data -= delta_weight def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_B[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def dispatch_megatron( target: torch.nn.Module, adapter_name: str, lora_config, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if lora_config.megatron_config: megatron_core = importlib.import_module(lora_config.megatron_core) else: megatron_core = None if megatron_core and isinstance( target_base_layer, (megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear), ): megatron_kwargs = kwargs.copy() megatron_config = lora_config.megatron_config if isinstance(megatron_config, dict): transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig megatron_config = transformer_config_class(**lora_config.megatron_config) megatron_kwargs["megatron_config"] = megatron_config if megatron_kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` " "or `RowParallelLinear`. " "Setting fan_in_fan_out to False." ) megatron_kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False new_module = LoraParallelLinear( base_layer=target, adapter_name=adapter_name, backend=megatron_core.tensor_parallel, **megatron_kwargs ) return new_module
peft/src/peft/tuners/lora/tp_layer.py/0
{ "file_path": "peft/src/peft/tuners/lora/tp_layer.py", "repo_id": "peft", "token_count": 6508 }
238
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class OFTConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`OFTModel`]. Args: r (`int`): OFT rank, number of OFT blocks per injected layer. oft_block_size (`int`): OFT block size across different layers. module_dropout (`float`): The multiplicative dropout probability, by setting OFT blocks to identity during training, similar to the dropout layer in LoRA. target_modules (`Optional[Union[list[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). bias (`str`): Bias type for OFT. Can be 'none', 'all' or 'oft_only'. If 'all' or 'oft_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. exclude_modules (`Optional[Union[List[str], str]]`): The names of the modules to not apply the adapter. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. init_weights (`bool`): Whether to perform initialization of OFT weights. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`Optional[Union[List[str], str]]`): The layer pattern name, used only if `layers_to_transform` is different from `None`. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`. modules_to_save (`List[str]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. coft (`bool`): Whether to use the constrained variant of OFT or not, off by default. eps (`float`): The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True. block_share (`bool`): Whether to share the OFT parameters between blocks or not. This is `False` by default. """ r: int = field(default=0, metadata={"help": "OFT rank, number of OFT blocks per injected layer."}) oft_block_size: int = field( default=32, metadata={ "help": "OFT block size across different layers.", "note": "You can only specify either r or oft_block_size, but not both simultaneously, because r x oft_block_size = layer dimension.", }, ) module_dropout: float = field( default=0.0, metadata={ "help": "OFT multiplicative dropout, randomly setting blocks of OFT to be identity matrix, similar to the dropout layer in LoRA." }, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with OFT." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." }, ) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: Literal["none", "all", "oft_only"] = field( default="none", metadata={"help": "Bias type for OFT. Can be 'none', 'all' or 'oft_only'"} ) exclude_modules: Optional[Union[list[str], str]] = field( default=None, metadata={"help": "List of module names or regex expression of the module names to exclude from OFT."}, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the OFT layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. " "This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`." }, ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from OFT layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) coft: bool = field( default=False, metadata={"help": "Whether to use the constrained variant of OFT or not."}, ) eps: float = field( default=6e-5, metadata={ "help": "The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True." }, ) block_share: bool = field( default=False, metadata={"help": "Whether to share the OFT parameters between blocks or not."}, ) use_cayley_neumann: bool = field( default=True, metadata={ "help": "Whether to use the Cayley-Neumann Formulation of OFT or not. Set to True to improve computational efficiency but comes at costs of bigger approximation error for orthogonality." }, ) num_cayley_neumann_terms: int = field( default=5, metadata={ "help": "Number of Cayley-Neumann terms to use. Higher number results in less approximation error for orthogonality." }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.OFT self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ") if self.r == 0 and self.oft_block_size == 0: raise ValueError( f"Either `r` or `oft_block_size` must be non-zero. Currently, r = {self.r} and oft_block_size = {self.oft_block_size}." ) if not (self.r != 0) ^ (self.oft_block_size != 0): raise ValueError( f"You can only specify either r ({self.r}) or oft_block_size ({self.oft_block_size}), but not both simultaneously, because r x oft_block_size == in_features." ) @classmethod def check_kwargs(cls, **kwargs): r""" Check if the kwargs are valid for the configuration. Args: kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the child class initialization. """ if "oft_block_size" not in kwargs: raise ValueError( "OFT has been updated since PEFT 0.14.0. Your trained adapter weights are incompatible " "with the latest version of OFT. Please retrain your adapter weights with newer PEFT versions. " "Alternatively, downgrade PEFT to version 0.13.0 to use the old adapter weights." ) return super().check_kwargs(**kwargs)
peft/src/peft/tuners/oft/config.py/0
{ "file_path": "peft/src/peft/tuners/oft/config.py", "repo_id": "peft", "token_count": 3754 }
239
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from peft.config import PromptLearningConfig from peft.utils import PeftType @dataclass class PrefixTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PrefixEncoder`]. Args: encoder_hidden_size (`int`): The hidden size of the prompt encoder. prefix_projection (`bool`): Whether to project the prefix embeddings. """ encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the encoder"}, ) prefix_projection: bool = field( default=False, metadata={"help": "Whether to project the prefix tokens"}, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.PREFIX_TUNING
peft/src/peft/tuners/prefix_tuning/config.py/0
{ "file_path": "peft/src/peft/tuners/prefix_tuning/config.py", "repo_id": "peft", "token_count": 463 }
240
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from dataclasses import dataclass, field from typing import Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType from .mask_functions import random_mask @dataclass class ShiraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`ShiraModel`]. Args: r (`int`, *optional*, defaults to `32`): For a given target module, the number of SHiRA parameters is computed as r(m+n), where the original tensor dimensions are m x n. This means the number of SHiRA parameters is the same as that for a LoRA adapter. SHiRA is a high rank adapter. Setting this r parameter does not restrict the rank to this value. mask_type (`str`, defaults to `random`): Type of mask function. Defaults to a random sparse mask. An optional user-defined mask_fn to compute the mask value can also be supplied by instantiating `config = ShiraConfig(...)` and then setting `config.mask_fn = <your custom mask function>`. For a pretrained weight with shape m x n, the custom mask function must return only one mask (shape: m x n) which must be binary 0 or 1 with num_shira_parameters = r(m + n) for linear layers. Device and dtype of mask must be same as base layer's weight's device and dtype. Please see mask_functions.py for more details and to see the default random sparse mask implementation. random_seed (`int`, *optional*, defaults to `None`): random seed for the torch generator for random_mask. target_modules (`Union[List[str], str]`): List of module names or regex expression of the module names to replace with SHiRA. For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. Only linear layers are supported. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. init_weights (`bool`, defaults to `True`): Initialize SHiRA weight to have zero values. If set to False, SHiRA weights are initialized to randn values instead of zeros and this is used only for testing. modules_to_save (`List[str]`): List of modules apart from SHiRA layers to be set as trainable and saved in the final checkpoint. """ r: int = field( default=32, metadata={ "help": ( "For a given target module, the number of SHiRA parameters is computed as r(m+n), where the original " "tensor dimensions are m x n. This means the number of SHiRA parameters is the same as that for a LoRA adapter. " "SHiRA is a high rank adapter. Setting this r parameter does not restrict the rank to this value." ) }, ) mask_type: Literal["random"] = field( default="random", metadata={ "help": ( "Type of mask function. Defaults to a random sparse mask. " "An optional user-defined mask_fn to compute the mask value can also be supplied by instantiating `config = ShiraConfig(...)` and then setting " "`config.mask_fn = <your custom mask function>`. For a pretrained weight with shape m x n, the custom mask function must return only one mask (shape: m x n) " "which must be binary 0 or 1 with num_shira_parameters = r(m + n) for linear layers. Device and dtype of mask must be same as base layer's weight's device and dtype. " "Please see mask_functions.py for more details and to see the default random sparse mask implementation." ) }, ) random_seed: Optional[int] = field( default=None, metadata={"help": "random seed for the torch generator for random_mask"} ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with SHiRA." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'. " "Only linear layers are supported." ) }, ) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) init_weights: bool = field( default=True, metadata={ "help": "Initialize SHiRA weight to have zero values. If set to False, SHiRA weights are initialized to randn values instead of zeros and this is used only for testing." }, ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": ( "List of modules apart from SHiRA layers to be set as trainable and saved in the final checkpoint. For" " example, in Sequence Classification or Token Classification tasks, the final layer" " `classifier/score` are randomly initialized and as such need to be trainable and saved." ) }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.SHIRA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) if self.mask_type == "random": self.mask_fn = random_mask else: if not self.inference_mode: warnings.warn( f"Argument {self.mask_type=} is not recognized, please supply your own masking function by calling `config.mask_fn = my_mask_fn`." ) self.mask_fn = None
peft/src/peft/tuners/shira/config.py/0
{ "file_path": "peft/src/peft/tuners/shira/config.py", "repo_id": "peft", "token_count": 2491 }
241
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.other import transpose from .._buffer_dict import BufferDict class VeraLayer(BaseTunerLayer): # List all names of layers that may contain adapter weights adapter_layer_names = ("vera_lambda_b", "vera_lambda_d") other_param_names = ("vera_A", "vera_B") def __init__(self, base_layer: nn.Module, **kwargs): self.base_layer = base_layer self.r = {} self.vera_dropout = nn.ModuleDict({}) # For storing vector scale self.vera_lambda_b = nn.ParameterDict({}) self.vera_lambda_d = nn.ParameterDict({}) # Stores a reference to the vera_A/B BufferDict. # Set to `None` otherwise to avoid computation with random weights self.vera_A: Optional[BufferDict] = None self.vera_B: Optional[BufferDict] = None # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) self.in_features = in_features self.out_features = out_features self.kwargs = kwargs @property def merged(self) -> bool: return bool(self.merged_adapters) def update_layer( self, adapter_name, vera_A: BufferDict, vera_B: BufferDict, r, vera_dropout, init_weights, d_initial: float = 0.1, ): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r if vera_dropout > 0.0: vera_dropout_layer = nn.Dropout(p=vera_dropout) else: vera_dropout_layer = nn.Identity() self.vera_dropout.update(nn.ModuleDict({adapter_name: vera_dropout_layer})) # Actual trainable parameters self.vera_lambda_b[adapter_name] = nn.Parameter(torch.ones(self.out_features), requires_grad=True) self.vera_lambda_d[adapter_name] = nn.Parameter(torch.randn(r), requires_grad=True) # non trainable references to vera_A/B buffers self.vera_A = vera_A self.vera_B = vera_B if adapter_name not in vera_A: # This means that this is not the first VeRA adapter. We have to add an entry in the dict for this adapter. if len(self.vera_A) < 1: raise ValueError( "The `vera_A` and `vera_B` buffers are empty. This should not happen. Please report this issue." ) # we can take any of the existing adapter's parameters, as they should all be identical vera_A_param = list(self.vera_A.values())[0] vera_B_param = list(self.vera_B.values())[0] error_tmpl = ( "{} has a size of {} but {} or greater is required; this probably happened because an additional VeRA " "adapter was added after the first one with incompatible shapes." ) # check input size if vera_A_param.shape[1] < self.in_features: raise ValueError(error_tmpl.format("vera_A", vera_A_param.shape[1], self.in_features)) # check output size if vera_B_param.shape[0] < self.out_features: raise ValueError(error_tmpl.format("vera_B", vera_B_param.shape[0], self.out_features)) # check r error_tmpl = ( "{} has a size of {} but {} or greater is required; this probably happened because an additional VeRA " "adapter with a lower rank was added after the first one; loading the adapters " "in reverse order may solve this." ) if vera_A_param.shape[0] < self.r[adapter_name]: raise ValueError(error_tmpl.format("vera_A", vera_A_param.shape[0], self.r[adapter_name])) if vera_B_param.shape[1] < self.r[adapter_name]: raise ValueError(error_tmpl.format("vera_B", vera_B_param.shape[1], self.r[adapter_name])) self.vera_A[adapter_name] = vera_A_param self.vera_B[adapter_name] = vera_B_param if init_weights: self.reset_vera_parameters(adapter_name, d_initial=d_initial) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def reset_vera_parameters(self, adapter_name, d_initial: float = 0.1): if adapter_name in self.vera_lambda_d.keys(): with torch.no_grad(): nn.init.zeros_(self.vera_lambda_d[adapter_name]).fill_(d_initial) nn.init.zeros_(self.vera_lambda_b[adapter_name]) class Linear(nn.Linear, VeraLayer): # Vera implemented in a dense layer def __init__( self, base_layer, vera_A: BufferDict, vera_B: BufferDict, adapter_name: str, r: int = 0, vera_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_target_conv_1d_layer: bool = False, init_weights: bool = True, d_initial: float = 0.1, **kwargs, ) -> None: # this gets the init from nn.Linear's super perspective, i.e. nn.Module.__init__, which should always be called super(nn.Linear, self).__init__() VeraLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer(adapter_name, vera_A, vera_B, r, vera_dropout, init_weights, d_initial=d_initial) self.is_target_conv_1d_layer = is_target_conv_1d_layer def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.vera_lambda_d.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.vera_lambda_d.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ vera_A = self.vera_A[adapter] vera_B = self.vera_B[adapter] device = vera_B.device dtype = vera_B.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) lambda_d = self.vera_lambda_d[adapter] lambda_b = self.vera_lambda_b[adapter] if cast_to_fp32: vera_A = vera_A.float() vera_B = vera_B.float() lambda_d = lambda_d.float() lambda_b = lambda_b.float() sliced_A = vera_A[:, : self.in_features].to(lambda_d.device) sliced_B = vera_B[: self.out_features, :].to(lambda_d.device) lambda_b = lambda_b.unsqueeze(-1) lambda_d = lambda_d.unsqueeze(-1) output_tensor = transpose((lambda_b * sliced_B) @ (lambda_d * sliced_A), self.fan_in_fan_out) if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) return output_tensor def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.vera_lambda_d.keys(): continue lambda_d = self.vera_lambda_d[active_adapter] lambda_b = self.vera_lambda_b[active_adapter] vera_A = self.vera_A[active_adapter] vera_B = self.vera_B[active_adapter] # As adapted layers may have different shapes and VeRA contains a single shared pair of A and B matrices, # we initialize these matrices with the largest required size for each dimension. # During the forward pass, required submatrices are sliced out from the shared vera_A and vera_B. sliced_A = vera_A[:, : self.in_features].to(x.device) sliced_B = vera_B[: self.out_features, :].to(x.device) dropout = self.vera_dropout[active_adapter] x = x.to(lambda_d.dtype) result = result + lambda_b * F.linear(lambda_d * F.linear(dropout(x), sliced_A), sliced_B) result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "vera." + rep
peft/src/peft/tuners/vera/layer.py/0
{ "file_path": "peft/src/peft/tuners/vera/layer.py", "repo_id": "peft", "token_count": 5541 }
242
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os import platform import re import warnings from typing import Optional import huggingface_hub import torch from huggingface_hub import file_exists, hf_hub_download from huggingface_hub.errors import EntryNotFoundError, LocalEntryNotFoundError from safetensors.torch import load_file as safe_load_file from transformers.utils import http_user_agent from peft.mapping import PEFT_TYPE_TO_PREFIX_MAPPING from .constants import INCLUDE_LINEAR_LAYERS_SHORTHAND from .other import ( EMBEDDING_LAYER_NAMES, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, AuxiliaryTrainingWrapper, check_file_exists_on_hf_hub, infer_device, match_target_against_key, ) from .peft_types import PeftType def has_valid_embedding_base_layer(layer): """Check if the layer has an embedding base layer""" return hasattr(layer, "base_layer") and isinstance(layer.base_layer, (torch.nn.Linear, torch.nn.Embedding)) def get_embedding_layer_name(model, layer, is_embedding_in_target_modules): """Get the name of the embedding module for a given layer.""" for name, module in model.named_modules(): if (not is_embedding_in_target_modules and module == layer) or module == getattr(layer, "base_layer", None): return name return None def get_peft_model_state_dict( model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto" ): """ Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the passed model will be used. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter whose state dict should be returned. unwrap_compiled (`bool`, *optional*, defaults to `False`): Whether to unwrap the model if torch.compile was used. save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it sets the boolean flag. This only works for 🤗 transformers models. """ if unwrap_compiled: model = getattr(model, "_orig_mod", model) config = model.peft_config[adapter_name] if state_dict is None: state_dict = model.state_dict() # TUNER SPECIFIC CODE if config.peft_type in (PeftType.LORA, PeftType.ADALORA): # to_return = lora_state_dict(model, bias=model.peft_config.bias) # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP bias = config.bias if bias == "none": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k} elif bias == "all": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k} elif bias == "lora_only": to_return = {} for k in state_dict: if "lora_" in k: to_return[k] = state_dict[k] bias_name = k.split("lora_")[0] + "bias" if bias_name in state_dict: to_return[bias_name] = state_dict[bias_name] else: raise NotImplementedError to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))} if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()} config.rank_pattern = rank_pattern to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name) if config.use_dora: # Here we take care of a refactor of DoRA which changed lora_magnitude_vector from a ParameterDict to a # ModuleDict with a DoraLayer instance. The old parameter is now the "weight" attribute of that layer. Since # we want the state_dict format not to change, we remove the "weight" part. new_dora_suffix = f"lora_magnitude_vector.{adapter_name}.weight" def renamed_dora_weights(k): if k.endswith(new_dora_suffix): k = k[:-7] # remove ".weight" return k to_return = {renamed_dora_weights(k): v for k, v in to_return.items()} elif config.peft_type == PeftType.BOFT: bias = config.bias if bias == "none": to_return = {k: state_dict[k] for k in state_dict if "boft_" in k} elif bias == "all": to_return = {k: state_dict[k] for k in state_dict if "boft_" in k or "bias" in k} elif bias == "boft_only": to_return = {} for k in state_dict: if "boft_" in k: to_return[k] = state_dict[k] bias_name = k.split("boft_")[0] + "bias" if bias_name in state_dict: to_return[bias_name] = state_dict[bias_name] else: raise NotImplementedError elif config.peft_type == PeftType.ADAPTION_PROMPT: to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")} elif config.is_prompt_learning: to_return = {} if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: if config.inference_mode: prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name) to_return["prompt_embeddings"] = prompt_embeddings elif config.peft_type == PeftType.SHIRA: shira_prefix = PEFT_TYPE_TO_PREFIX_MAPPING[config.peft_type] to_return = {k: state_dict[k] for k in state_dict if shira_prefix in k} if platform.system() == "Windows": warnings.warn( "Windows has issues saving integers into safetensors. Hence, we convert shira_indices to float32 " "before saving on Windows OS. The shira_indices will always be converted to integers when loading." ) for name, module in model.named_modules(): if hasattr(module, "shira_indices"): for k, v in module.shira_indices.items(): # Windows has some issues with saving integers into safetensors. Tests fail with some kind of # PermissionError. This results in failed tests, so we are converting indices to float32 before # saving and then converting them back to int when loading. This is happening only for Windows, # not for Linux and Mac-OS. to_return[f"{name}.shira_indices.{k}"] = ( v.to(torch.float32) if platform.system() == "Windows" else v ) elif config.peft_type == PeftType.VERA: vera_prefix = PEFT_TYPE_TO_PREFIX_MAPPING[config.peft_type] to_return = {k: state_dict[k] for k in state_dict if vera_prefix in k} if config.save_projection: # TODO: adding vera_A and vera_B to `self.get_base_layer` would # make name to match here difficult to predict. if f"base_model.vera_A.{adapter_name}" not in state_dict: raise ValueError( "Model was initialised to not save vera_A and vera_B but config now specifies to save projection!" " Set `config.save_projection` to `False`." ) to_return["base_model.vera_A." + adapter_name] = state_dict["base_model.vera_A." + adapter_name] to_return["base_model.vera_B." + adapter_name] = state_dict["base_model.vera_B." + adapter_name] elif config.peft_type == PeftType.XLORA: to_return = {k: state_dict[k] for k in state_dict if "internal_xlora_classifier" in k} elif config.peft_type == PeftType.VBLORA: to_return = {} # choose the most efficient dtype for indices if config.num_vectors < 2**8: indices_dtype = torch.uint8 elif config.num_vectors < 2**15: indices_dtype = torch.int16 elif config.num_vectors < 2**31: indices_dtype = torch.int32 else: indices_dtype = torch.int64 if config.save_only_topk_weights: # in save_only_topk_weights mode, we save topk_indices and topk_weights for parameter efficiency for k in state_dict: if "vblora_logits" in k: logits, indices = state_dict[k].topk(config.topk) to_return.update({k + "_topk_indices": indices.to(dtype=indices_dtype)}) to_return.update({k + "_topk_weights": torch.softmax(logits, dim=-1)[:, :, :-1].contiguous()}) else: to_return = {k: state_dict[k] for k in state_dict if "vblora_logits" in k} to_return["base_model.vblora_vector_bank." + adapter_name] = state_dict[ "base_model.vblora_vector_bank." + adapter_name ] elif config.peft_type in list(PeftType): prefix = PEFT_TYPE_TO_PREFIX_MAPPING[config.peft_type] to_return = {k: state_dict[k] for k in state_dict if prefix in k} else: raise ValueError(f"Unknown PEFT type passed: {config.peft_type}") # ADDITIONAL TRAINING MODULES / MODULES_TO_SAVE for name, module in model.named_modules(): if isinstance(module, AuxiliaryTrainingWrapper): if name.startswith("_fsdp_wrapped_module."): # If FSDP is used, the state_dict is from the unwrapped model, which will result in a key mismatch if we # don't remove the FSDP-specific prefix name = name.removeprefix("_fsdp_wrapped_module.") # Compute the module-relative state dict to make it easier for the adapter to fetch the appropriate # keys that the module thinks need to be saved. We cannot rely on `.state_dict()` internally of the # module since accelerators like DeepSpeed require special handling which is done for the model # state dict from above but most likely not in the module itself. See #2450. module_state_dict = { k.removeprefix(f"{name}."): v for k, v in state_dict.items() if k.startswith(f"{name}.") } to_return.update( {f"{name}.{k}": v for k, v in module.adapter_state_dict(adapter_name, module_state_dict).items()} ) # DEAL WITH EMBEDDINGS # # save_embedding_layer="auto" needs to check the following logic: # # - when vocab size was NOT changed, embeddings should be saved only when targeted # but not when # - using PeftType.TRAINABLE_TOKENS # - LoRA using trainable_token_indices (since their goal is to space-efficient) # but # - when vocab size was changed, embeddings should be saved automatically regardless to cover this # scenario: 1) fine-tune embedding, 2) resize embedding, 3) train with trainable tokens # embedding_is_targeted = False if hasattr(config, "target_modules"): if isinstance(config.target_modules, str) and (config.target_modules != INCLUDE_LINEAR_LAYERS_SHORTHAND): # `model` could be a PeftModel or something else like transformers/diffusers/..., in which case unwrapping is # not needed. _model = model.get_base_model() if hasattr(model, "get_base_model") else model embedding_is_targeted = any( match_target_against_key(config.target_modules, k) for k, _ in _model.named_modules() if any(re.match(rf"(.*\.)?{e}$", k) for e in EMBEDDING_LAYER_NAMES) ) elif config.target_modules: embedding_is_targeted = any(k in config.target_modules for k in EMBEDDING_LAYER_NAMES) using_trainable_tokens = ( config.peft_type == PeftType.TRAINABLE_TOKENS or getattr(config, "trainable_token_indices", None) is not None ) if save_embedding_layers == "auto" and embedding_is_targeted and not using_trainable_tokens: warnings.warn("Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`.") save_embedding_layers = True elif save_embedding_layers == "auto": vocab_size = getattr(getattr(model, "config", None), "vocab_size", None) model_id = getattr(config, "base_model_name_or_path", None) # For some models e.g. diffusers the text config file is stored in a subfolder # we need to make sure we can download that config. has_base_config = False # ensure that this check is not performed in HF offline mode, see #1452 if model_id is not None: local_config_exists = os.path.exists(os.path.join(model_id, "config.json")) exists = local_config_exists or check_file_exists_on_hf_hub(model_id, "config.json") if exists is None: # check failed, could not determine if it exists or not warnings.warn( f"Could not find a config file in {model_id} - will assume that the vocabulary was not modified." ) has_base_config = False else: has_base_config = exists # check if the vocab size of the base model is different from the vocab size of the finetuned model if ( vocab_size and model_id and has_base_config and (vocab_size != model.config.__class__.from_pretrained(model_id).vocab_size) ): warnings.warn( "Setting `save_embedding_layers` to `True` as the embedding layer has been resized during finetuning." ) save_embedding_layers = True else: save_embedding_layers = False if save_embedding_layers and hasattr(model, "get_input_embeddings"): for layer in [model.get_input_embeddings(), model.get_output_embeddings()]: # Either the layer is not targeted, then it must have been resized and needs saving. Or it is targeted and # therefore has a valid base layer, then we'll save it as well. if not embedding_is_targeted or has_valid_embedding_base_layer(layer): embedding_module_name = get_embedding_layer_name(model, layer, embedding_is_targeted) if embedding_module_name: to_return.update({k: v for k, v in state_dict.items() if embedding_module_name in k}) elif save_embedding_layers: warnings.warn("Could not identify embedding layer(s) because the model is not a 🤗 transformers model.") # REMOVE ADAPTER NAME to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()} return to_return def _find_mismatched_keys( model: torch.nn.Module, peft_model_state_dict: dict[str, torch.Tensor], ignore_mismatched_sizes: bool = False ) -> tuple[dict[str, torch.Tensor], list[tuple[str, tuple[int, ...], tuple[int, ...]]]]: if not ignore_mismatched_sizes: return peft_model_state_dict, [] mismatched = [] state_dict = model.state_dict() for key, tensor in peft_model_state_dict.items(): if key not in state_dict: continue # see https://github.com/huggingface/transformers/blob/09f9f566de83eef1f13ee83b5a1bbeebde5c80c1/src/transformers/modeling_utils.py#L3858-L3864 if (state_dict[key].shape[-1] == 1) and (state_dict[key].numel() * 2 == tensor.numel()): # This skips size mismatches for 4-bit weights. Two 4-bit values share an 8-bit container, causing size # differences. Without matching with module type or parameter type it seems like a practical way to detect # valid 4bit weights. continue if state_dict[key].shape != tensor.shape: mismatched.append((key, tensor.shape, state_dict[key].shape)) for key, _, _ in mismatched: del peft_model_state_dict[key] return peft_model_state_dict, mismatched def _insert_adapter_name_into_state_dict( state_dict: dict[str, torch.Tensor], adapter_name: str, parameter_prefix: str ) -> dict[str, torch.Tensor]: """Utility function to remap the state_dict keys to fit the PEFT model by inserting the adapter name.""" peft_model_state_dict = {} for key, val in state_dict.items(): if parameter_prefix in key: suffix = key.split(parameter_prefix)[1] if "." in suffix: suffix_to_replace = ".".join(suffix.split(".")[1:]) key = key.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}") else: key = f"{key}.{adapter_name}" peft_model_state_dict[key] = val else: peft_model_state_dict[key] = val return peft_model_state_dict def set_peft_model_state_dict( model, peft_model_state_dict, adapter_name="default", ignore_mismatched_sizes: bool = False, low_cpu_mem_usage: bool = False, ): """ Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter whose state dict should be set. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`): Whether to ignore mismatched in the state dict. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): This argument must be `True` if the `model` was loaded with adapter weights on the meta device, e.g. after calling `inject_adapter_in_model` with `low_cpu_mem_usage=True`. Otherwise, leave it as `False`. """ config = model.peft_config[adapter_name] state_dict = peft_model_state_dict # handle auxiliary training wrappers such as ModulesToSaveWrapper and TrainableTokensWrapper by getting each of # them and translating saved state dict key (which does not include the adapter name) to loaded state dict key # (which includes the adapter name). for name, module in model.named_modules(): if isinstance(module, AuxiliaryTrainingWrapper): # Not every module has a 1:1 mapping. ModulesToSaveWrapper, for example, removes the # `modules_to_save.{adapter_name}.` prefix. This prefix must be restored when loading the model from the # saved state dict which is why we fetch a load key map from the wrapper. key_map = module.adapter_state_dict_load_map(adapter_name) if name.startswith("_fsdp_wrapped_module."): # If FSDP is used, the state_dict is from the unwrapped model, which will result in a key mismatch if we # don't remove the FSDP-specific prefix name = name.removeprefix("_fsdp_wrapped_module.") for k in key_map: lookup_key = f"{name}.{k}" store_key = f"{name}.{key_map[k]}" state_dict[store_key] = peft_model_state_dict[lookup_key] # delete the old key from the previous `state_dict = peft_model_state_dict` statement. del state_dict[lookup_key] if config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT: peft_model_state_dict = state_dict elif config.peft_type == PeftType.XLORA: peft_model_state_dict = state_dict elif config.peft_type in PEFT_TYPE_TO_PREFIX_MAPPING: peft_model_state_dict = {} parameter_prefix = PEFT_TYPE_TO_PREFIX_MAPPING[config.peft_type] if config.peft_type == PeftType.VBLORA and config.save_only_topk_weights: num_vectors, _ = model.vblora_vector_bank[adapter_name].shape state_dict_keys = list(state_dict.keys()) for k in state_dict_keys: # in save_only_topk_weights mode, only topk_indices and topk_weights are saved # note that topk_indices and topk_weights serve as an efficient representation of the logits # so we need to recover the logits from the topk_indices and topk_weights if "_topk_indices" in k: v = state_dict[k].to(torch.long) original_key = k.replace("_topk_indices", "") # find the corresponding topk_weights from the state_dict topk_weights = state_dict[k.replace("_topk_indices", "_topk_weights")] # as we only save the first k-1 topk_weights, here we recover the last one topk_weights = torch.cat([topk_weights, 1 - topk_weights.sum(-1, keepdim=True)], dim=-1) # convert the weights to logits topk_logits = torch.log(topk_weights) matrix = ( torch.zeros([*(topk_logits.shape[:-1]), num_vectors]) .fill_(float("-inf")) .to(topk_logits.device) .scatter(-1, v, topk_logits) ) # add logits to the state_dict state_dict[original_key] = matrix # delete the topk_indices and topk_weights from the state_dict del state_dict[k] del state_dict[k.replace("_topk_indices", "_topk_weights")] peft_model_state_dict = _insert_adapter_name_into_state_dict( state_dict, adapter_name=adapter_name, parameter_prefix=parameter_prefix ) if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: model.resize_modules_by_rank_pattern(rank_pattern, adapter_name) elif config.peft_type == PeftType.SHIRA: if platform.system() == "Windows": warnings.warn( "Windows has issues saving integers into safetensors. Hence, we had converted shira_indices " "to float32 before saving on Windows OS. The shira_indices will always be converted to integers " "when loading." ) for name, module in model.named_modules(): if hasattr(module, "shira_indices"): # for k, v in module.shira_indices.items(): if f"{name}.shira_indices.{adapter_name}" in peft_model_state_dict: shira_indices_values = peft_model_state_dict.pop(f"{name}.shira_indices.{adapter_name}") # Convert shira_indices to int in case they were saved on a Windows OS and are being loaded # on a Linux or a Mac-OS system. If they were saved in Linux or Mac-OS, they are already # integers and the following will not affect anything. module.shira_indices[adapter_name] = shira_indices_values.to(torch.int) elif config.peft_type == PeftType.VERA: if config.save_projection and "base_model.vera_A" not in peft_model_state_dict: raise ValueError( "Specified to load vera_A and vera_B from state dictionary however they were not present!" ) elif not config.save_projection and "base_model.vera_A" in peft_model_state_dict: warnings.warn( "Specified to not load vera_A and vera_B from state dictionary however they are present in state" " dictionary! Consider using them to ensure checkpoint loading is correct on all platforms using" " `peft_config.save_projection = True`" ) elif not config.save_projection: # and no vera_A in state dictionary warnings.warn( "Specified to not load vera_A and vera_B from state dictionary. This means we will be relying on" " PRNG initialisation to restore these projections using `config.projection_prng_key`, which may" " not be accurate on all system configurations." ) elif config.peft_type == PeftType.LORA: # Here we take care of a refactor of DoRA which changed lora_magnitude_vector from a ParameterDict to a # ModuleDict with a DoraLayer instance. The old parameter is now the "weight" attribute of that layer. old_dora_suffix = f"lora_magnitude_vector.{adapter_name}" def renamed_dora_weights(k): if k.endswith(old_dora_suffix): k = k + ".weight" return k peft_model_state_dict = {renamed_dora_weights(k): v for k, v in peft_model_state_dict.items()} elif config.peft_type == PeftType.OFT: if any(".oft_r." in key for key in peft_model_state_dict): raise ValueError( "Trying to load old OFT checkpoint, which is no longer supported. Please install PEFT <= v0.15.2 to load it or train a new OFT adapter." ) else: raise NotImplementedError peft_model_state_dict, mismatched_keys = _find_mismatched_keys( model, peft_model_state_dict, ignore_mismatched_sizes=ignore_mismatched_sizes ) if low_cpu_mem_usage: load_result = model.load_state_dict(peft_model_state_dict, strict=False, assign=True) # ensure that the correct device is set for module in model.modules(): if hasattr(module, "_move_adapter_to_device_of_base_layer"): module._move_adapter_to_device_of_base_layer(adapter_name) else: load_result = model.load_state_dict(peft_model_state_dict, strict=False) if config.is_prompt_learning: model.prompt_encoder[adapter_name].embedding.load_state_dict( {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True ) if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False) if mismatched_keys: # see https://github.com/huggingface/transformers/blob/09f9f566de83eef1f13ee83b5a1bbeebde5c80c1/src/transformers/modeling_utils.py#L4039 mismatched_warning = "\n".join( [ f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" for key, shape1, shape2 in mismatched_keys ] ) msg = ( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint " f"and are being ignored because you passed `ignore_mismatched_sizes=True`: {mismatched_warning}." ) warnings.warn(msg) return load_result # TODO: remove this function, use vanilla torch.load as soon as torch < 2.6.0 is no longer supported def torch_load(*args, weights_only=True, **kwargs): """Call torch.load and handle weights_only. Defaults to weights_only=True to anticipate upcoming switch on the PyTorch side. """ return torch.load(*args, weights_only=weights_only, **kwargs) def load_peft_weights( model_id: str, device: Optional[str] = None, key_mapping: Optional[dict[str, str]] = None, **hf_hub_download_kwargs ) -> dict: r""" A helper method to load the PEFT weights from the HuggingFace Hub or locally Args: model_id (`str`): The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. device (`str`): The device to load the weights onto. key_mapping (dict, *optional*, defaults to None) Extra mapping of PEFT `state_dict` keys applied before loading the `state_dict`. When this mapping is applied, the PEFT-specific `"base_model.model"` prefix is removed beforehand and the adapter name (e.g. `"default"`) is not inserted yet. Only pass this argument if you know what you're doing. hf_hub_download_kwargs (`dict`): Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. """ path = ( os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) if hf_hub_download_kwargs.get("subfolder", None) is not None else model_id ) if device is None: device = infer_device() def get_hub_filename(use_safetensors=True): weights_name = SAFETENSORS_WEIGHTS_NAME if use_safetensors else WEIGHTS_NAME return ( os.path.join(hf_hub_download_kwargs["subfolder"], weights_name) if hf_hub_download_kwargs.get("subfolder", None) is not None else weights_name ) if "user_agent" not in hf_hub_download_kwargs: hf_hub_download_kwargs["user_agent"] = http_user_agent() if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)): filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME) use_safetensors = True elif os.path.exists(os.path.join(path, WEIGHTS_NAME)): filename = os.path.join(path, WEIGHTS_NAME) use_safetensors = False elif huggingface_hub.constants.HF_HUB_OFFLINE: # if in offline mode, check if we can find the adapter file locally hub_filename = get_hub_filename(use_safetensors=True) hf_hub_download_kwargs.pop("local_files_only", None) try: filename = hf_hub_download(model_id, hub_filename, local_files_only=True, **hf_hub_download_kwargs) use_safetensors = True except LocalEntryNotFoundError: # Could not find safetensors, try pickle. If this also fails, it's fine to let the error be raised here, as # it means that the user tried to load a non-cached model in offline mode. hub_filename = get_hub_filename(use_safetensors=False) filename = hf_hub_download(model_id, hub_filename, local_files_only=True, **hf_hub_download_kwargs) use_safetensors = False else: token = hf_hub_download_kwargs.get("token", None) if token is None: token = hf_hub_download_kwargs.get("use_auth_token", None) hub_filename = get_hub_filename(use_safetensors=True) has_remote_safetensors_file = file_exists( repo_id=model_id, filename=hub_filename, revision=hf_hub_download_kwargs.get("revision", None), repo_type=hf_hub_download_kwargs.get("repo_type", None), token=token, ) use_safetensors = has_remote_safetensors_file if has_remote_safetensors_file: # Priority 1: load safetensors weights filename = hf_hub_download( model_id, SAFETENSORS_WEIGHTS_NAME, **hf_hub_download_kwargs, ) else: try: filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs) except EntryNotFoundError: raise ValueError( f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}." ) if use_safetensors: if hasattr(torch.backends, "mps") and (device == torch.device("mps")): adapters_weights = safe_load_file(filename, device="cpu") else: adapters_weights = safe_load_file(filename, device=device) else: adapters_weights = torch_load(filename, map_location=torch.device(device)) if not key_mapping: remapped_adapters_weights = adapters_weights else: # See discussion in https://github.com/huggingface/transformers/pull/38627 # Remap adapter weight names according to the provided key_mapping. remapped_adapters_weights = {} for key, val in adapters_weights.items(): if key.startswith("base_model.model."): prefix = "base_model.model." elif key.startswith("base_model."): prefix = "base_model." else: raise ValueError( "An error occurred while trying to load a PEFT state_dict with key_mapping. This should not " "happen. Please open an issue on https://github.com/huggingface/peft/issues and report the error." ) key = key.removeprefix(prefix) # the key map assumes that there is no prefix for pattern, replacement in key_mapping.items(): key_new, n_replace = re.subn(pattern, replacement, key) # Early exit of the loop if n_replace > 0: key = key_new break key_with_prefix = f"{prefix}{key}" remapped_adapters_weights[key_with_prefix] = val return remapped_adapters_weights
peft/src/peft/utils/save_and_load.py/0
{ "file_path": "peft/src/peft/utils/save_and_load.py", "repo_id": "peft", "token_count": 15268 }
243
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import pytest import torch from transformers import AutoModelForSeq2SeqLM, AutoModelForTokenClassification from peft import ( AdaLoraConfig, BOFTConfig, BoneConfig, C3AConfig, FourierFTConfig, HRAConfig, IA3Config, LoraConfig, MissConfig, OFTConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, RoadConfig, ShiraConfig, TaskType, VBLoRAConfig, VeraConfig, get_peft_model, ) from .testing_common import PeftCommonTester from .testing_utils import set_init_weights_false PEFT_ENCODER_DECODER_MODELS_TO_TEST = [ "ybelkada/tiny-random-T5ForConditionalGeneration-calibrated", "hf-internal-testing/tiny-random-BartForConditionalGeneration", ] # TODO Missing from this list are LoKr, LoHa, LN Tuning, add them ALL_CONFIGS = [ ( AdaLoraConfig, { "target_modules": None, "total_step": 1, "task_type": "SEQ_2_SEQ_LM", }, ), ( BOFTConfig, { "target_modules": None, "task_type": "SEQ_2_SEQ_LM", }, ), ( BoneConfig, { "target_modules": None, "r": 2, "task_type": "SEQ_2_SEQ_LM", }, ), ( MissConfig, { "target_modules": None, "r": 2, "task_type": "SEQ_2_SEQ_LM", }, ), ( FourierFTConfig, { "n_frequency": 10, "target_modules": None, "task_type": "SEQ_2_SEQ_LM", }, ), ( HRAConfig, { "target_modules": None, "task_type": "SEQ_2_SEQ_LM", }, ), ( IA3Config, { "target_modules": None, "feedforward_modules": None, "task_type": "SEQ_2_SEQ_LM", }, ), ( LoraConfig, { "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", "task_type": "SEQ_2_SEQ_LM", }, ), ( LoraConfig, { "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", "trainable_token_indices": [0, 1, 3], "task_type": "SEQ_2_SEQ_LM", }, ), ( OFTConfig, { "target_modules": None, "task_type": "SEQ_2_SEQ_LM", }, ), ( PrefixTuningConfig, { "num_virtual_tokens": 10, "task_type": "SEQ_2_SEQ_LM", }, ), ( PromptEncoderConfig, { "num_virtual_tokens": 10, "encoder_hidden_size": 32, "task_type": "SEQ_2_SEQ_LM", }, ), ( PromptTuningConfig, { "num_virtual_tokens": 10, "task_type": "SEQ_2_SEQ_LM", }, ), ( RoadConfig, { "task_type": "SEQ_2_SEQ_LM", "variant": "road_1", "group_size": 2, }, ), ( ShiraConfig, { "r": 1, "task_type": "SEQ_2_SEQ_LM", "target_modules": None, "init_weights": False, }, ), ( VBLoRAConfig, { "target_modules": None, "vblora_dropout": 0.05, "vector_length": 1, "num_vectors": 2, "task_type": "SEQ_2_SEQ_LM", }, ), ( VeraConfig, { "r": 8, "target_modules": None, "vera_dropout": 0.05, "projection_prng_key": 0xFF, "d_initial": 0.1, "save_projection": True, "bias": "none", "task_type": "SEQ_2_SEQ_LM", }, ), ( C3AConfig, { "task_type": "SEQ_2_SEQ_LM", "block_size": 1, "target_modules": None, }, ), ] class TestEncoderDecoderModels(PeftCommonTester): transformers_class = AutoModelForSeq2SeqLM def skipTest(self, reason=""): # for backwards compatibility with unittest style test classes pytest.skip(reason) def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) decoder_input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, } return input_dict @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_attributes_parametrized(self, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_adapter_name(self, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_prepare_for_training_parametrized(self, model_id, config_cls, config_kwargs): self._test_prepare_for_training(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained(self, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained_pickle(self, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_save_pretrained_selected_adapters_pickle(self, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs, safe_serialization=False) def test_load_model_low_cpu_mem_usage(self): # Using the first model with LoraConfig and an empty config_kwargs. self._test_load_model_low_cpu_mem_usage(PEFT_ENCODER_DECODER_MODELS_TO_TEST[0], LoraConfig, {}) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_merge_layers(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_merge_layers(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_mixed_adapter_batches(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_mixed_adapter_batches(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_generate_with_mixed_adapter_batches(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_generate_with_mixed_adapter_batches_and_beam_search(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_generate(self, model_id, config_cls, config_kwargs): self._test_generate(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_generate_pos_args(self, model_id, config_cls, config_kwargs): self._test_generate_pos_args(model_id, config_cls, config_kwargs, raises_err=True) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_generate_half_prec(self, model_id, config_cls, config_kwargs): self._test_generate_half_prec(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs): self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_encoder_decoders(self, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_encoder_decoders_layer_indexing(self, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_encoder_decoders_gradient_checkpointing(self, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_inference_safetensors(self, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_peft_model_device_map(self, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_delete_adapter(self, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs): self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs): self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_unload_adapter(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_unload_adapter(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) @pytest.mark.parametrize("model_id", PEFT_ENCODER_DECODER_MODELS_TO_TEST) @pytest.mark.parametrize("config_cls,config_kwargs", ALL_CONFIGS) def test_disable_adapter(self, model_id, config_cls, config_kwargs): config_kwargs = set_init_weights_false(config_cls, config_kwargs) self._test_disable_adapter(model_id, config_cls, config_kwargs) def test_active_adapters_prompt_learning(self): model = AutoModelForSeq2SeqLM.from_pretrained( "hf-internal-testing/tiny-random-BartForConditionalGeneration" ).to(self.torch_device) # any prompt learning method would work here config = PromptEncoderConfig(task_type=TaskType.SEQ_2_SEQ_LM, num_virtual_tokens=10) model = get_peft_model(model, config) assert model.active_adapters == ["default"] def test_save_shared_tensors(self): model_id = "hf-internal-testing/tiny-random-RobertaModel" peft_config = LoraConfig( task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all", ) model = AutoModelForTokenClassification.from_pretrained(model_id, num_labels=11) model = get_peft_model(model, peft_config) with tempfile.TemporaryDirectory() as tmp_dir: # This should work fine model.save_pretrained(tmp_dir, safe_serialization=True)
peft/tests/test_encoder_decoder_models.py/0
{ "file_path": "peft/tests/test_encoder_decoder_models.py", "repo_id": "peft", "token_count": 7431 }
244
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import pytest import torch from torch.testing import assert_close from transformers import AutoModelForCausalLM from peft import get_peft_model from peft.peft_model import PeftModel from peft.tuners.multitask_prompt_tuning import MultitaskPromptTuningConfig, MultitaskPromptTuningInit from peft.utils import infer_device from peft.utils.other import WEIGHTS_NAME, prepare_model_for_kbit_training from peft.utils.save_and_load import get_peft_model_state_dict MODELS_TO_TEST = [ "trl-internal-testing/tiny-random-LlamaForCausalLM", ] class TestMultiTaskPromptTuning: """ Tests for the MultiTaskPromptTuning model. """ @pytest.fixture def config(cls) -> MultitaskPromptTuningConfig: return MultitaskPromptTuningConfig( task_type="CAUSAL_LM", num_virtual_tokens=50, num_tasks=3, prompt_tuning_init_text=( "classify the following into either positive or negative, or entailment, neutral or contradiction:" ), ) transformers_class = AutoModelForCausalLM torch_device = infer_device() @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_prepare_for_training(self, model_id, config): model = AutoModelForCausalLM.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model.get_input_embeddings()(dummy_input) assert not dummy_output.requires_grad @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_prepare_for_int8_training(self, model_id, config): model = AutoModelForCausalLM.from_pretrained(model_id) model = prepare_model_for_kbit_training(model) model = model.to(self.torch_device) for param in model.parameters(): assert not param.requires_grad model = get_peft_model(model, config) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model.get_input_embeddings()(dummy_input) assert dummy_output.requires_grad @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_save_pretrained(self, model_id, config): seed = 420 torch.manual_seed(seed) model = AutoModelForCausalLM.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) torch.manual_seed(seed) model_from_pretrained = AutoModelForCausalLM.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 3 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.safetensors` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.safetensors")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `pytorch_model.bin` is not present assert not os.path.exists(os.path.join(tmp_dirname, "pytorch_model.bin")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_save_pretrained_regression(self, model_id, config): seed = 420 torch.manual_seed(seed) model = AutoModelForCausalLM.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, safe_serialization=False) torch.manual_seed(seed) model_from_pretrained = AutoModelForCausalLM.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys assert state_dict.keys() == state_dict_from_pretrained.keys() # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). assert len(state_dict) == 3 # check if tensors equal for key in state_dict.keys(): assert torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) # check if `adapter_model.bin` is present for regression assert os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin")) # check if `adapter_config.json` is present assert os.path.exists(os.path.join(tmp_dirname, "adapter_config.json")) # check if `pytorch_model.bin` is not present assert not os.path.exists(os.path.join(tmp_dirname, "pytorch_model.bin")) # check if `config.json` is not present assert not os.path.exists(os.path.join(tmp_dirname, "config.json")) @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_generate(self, model_id, config): model = AutoModelForCausalLM.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) task_ids = torch.LongTensor([1, 2]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask, task_ids=task_ids) # check if `generate` works if positional arguments are passed _ = model.generate(input_ids, attention_mask=attention_mask, task_ids=task_ids) @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_use_cache(self, model_id, config): """Test that MultiTaskPromptTuning works when Llama config use_cache=True.""" torch.manual_seed(0) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) task_ids = torch.LongTensor([1, 2]).to(self.torch_device) original = AutoModelForCausalLM.from_pretrained(model_id) mpt = get_peft_model(original, config) mpt = mpt.to(self.torch_device) expected = mpt.generate(input_ids=input_ids, max_length=8, task_ids=task_ids) # Set use_cache = True and generate output again. mpt.base_model.config.use_cache = True actual = mpt.generate(input_ids=input_ids, max_length=8, task_ids=task_ids) assert_close(expected, actual, rtol=0, atol=0) @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_bf16_inference(self, model_id, config): """Test that MultiTaskPromptTuning works when Llama using a half-precision model.""" input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) task_ids = torch.tensor([1, 2]).to(self.torch_device) original = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) mpt = get_peft_model(original, config) mpt = mpt.to(self.torch_device) _ = mpt.generate(input_ids=input_ids, task_ids=task_ids) @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_generate_text_with_random_init(self, model_id, config) -> None: torch.manual_seed(0) model = AutoModelForCausalLM.from_pretrained(model_id) config.prompt_tuning_init = MultitaskPromptTuningInit.RANDOM model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) task_ids = torch.LongTensor([0]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask, task_ids=task_ids) with pytest.raises(ValueError): # check if `generate` raises an error if task_ids are not passed _ = model.generate(input_ids, attention_mask=attention_mask) @pytest.mark.parametrize( "prompt_tuning_init", [ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, MultitaskPromptTuningInit.EXACT_SOURCE_TASK, MultitaskPromptTuningInit.ONLY_SOURCE_SHARED, ], ) @pytest.mark.parametrize("model_id", MODELS_TO_TEST) def test_generate_text_with_other_init(self, prompt_tuning_init, model_id, config) -> None: # This test is flaky, hence fixing the seed. The reason is somehow related to: # https://github.com/huggingface/transformers/blob/e786844425b6b1112c76513d66217ce2fe6aea41/src/transformers/generation/utils.py#L2691 # When an EOS token is generated, the loop is exited and the pytest.raises at the bottom is not triggered # because `forward` of the PEFT model, which should raise the error, is never called. torch.manual_seed(42) # seed 43 fails with transformers v4.42.3 and torch v2.3.1 with tempfile.TemporaryDirectory() as tmp_dirname: model = AutoModelForCausalLM.from_pretrained(model_id) model = get_peft_model(model, config) model.save_pretrained(tmp_dirname, safe_serialization=False) # bc torch.load is used config = MultitaskPromptTuningConfig( task_type="CAUSAL_LM", num_virtual_tokens=50, num_tasks=1, prompt_tuning_init_text=( "classify the following into either positive or negative, or entailment, neutral or contradiction:" ), prompt_tuning_init=prompt_tuning_init, prompt_tuning_init_state_dict_path=os.path.join(tmp_dirname, WEIGHTS_NAME), ) model = AutoModelForCausalLM.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) task_ids = torch.LongTensor([0]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask, task_ids=task_ids) with pytest.raises(ValueError, match="task_ids cannot be None"): # check if `generate` raises an error if task_ids are not passed _ = model.generate(input_ids, attention_mask=attention_mask)
peft/tests/test_multitask_prompt_tuning.py/0
{ "file_path": "peft/tests/test_multitask_prompt_tuning.py", "repo_id": "peft", "token_count": 5520 }
245
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from contextlib import contextmanager from functools import lru_cache, wraps from unittest import mock import numpy as np import pytest import torch from accelerate.test_utils.testing import get_backend from datasets import load_dataset from peft import ( AdaLoraConfig, IA3Config, LNTuningConfig, LoraConfig, PromptLearningConfig, VBLoRAConfig, ) from peft.import_utils import ( is_aqlm_available, is_auto_awq_available, is_auto_gptq_available, is_eetq_available, is_gptqmodel_available, is_hqq_available, is_optimum_available, is_torchao_available, ) # Globally shared model cache used by `hub_online_once`. _HUB_MODEL_ACCESSES = {} torch_device, device_count, memory_allocated_func = get_backend() def require_non_cpu(test_case): """ Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no hardware accelerator available. """ return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case) def require_non_xpu(test_case): """ Decorator marking a test that should be skipped for XPU. """ return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case) def require_torch_gpu(test_case): """ Decorator marking a test that requires a GPU. Will be skipped when no GPU is available. """ if not torch.cuda.is_available(): return unittest.skip("test requires GPU")(test_case) else: return test_case def require_torch_multi_gpu(test_case): """ Decorator marking a test that requires multiple GPUs. Will be skipped when less than 2 GPUs are available. """ if not torch.cuda.is_available() or torch.cuda.device_count() < 2: return unittest.skip("test requires multiple GPUs")(test_case) else: return test_case def require_torch_multi_accelerator(test_case): """ Decorator marking a test that requires multiple hardware accelerators. These tests are skipped on a machine without multiple accelerators. """ return unittest.skipUnless( torch_device != "cpu" and device_count > 1, "test requires multiple hardware accelerators" )(test_case) def require_bitsandbytes(test_case): """ Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library is not installed. """ try: import bitsandbytes # noqa: F401 test_case = pytest.mark.bitsandbytes(test_case) except ImportError: test_case = pytest.mark.skip(reason="test requires bitsandbytes")(test_case) return test_case def require_auto_gptq(test_case): """ Decorator marking a test that requires auto-gptq. These tests are skipped when auto-gptq isn't installed. """ return unittest.skipUnless(is_gptqmodel_available() or is_auto_gptq_available(), "test requires auto-gptq")( test_case ) def require_gptqmodel(test_case): """ Decorator marking a test that requires gptqmodel. These tests are skipped when gptqmodel isn't installed. """ return unittest.skipUnless(is_gptqmodel_available(), "test requires gptqmodel")(test_case) def require_aqlm(test_case): """ Decorator marking a test that requires aqlm. These tests are skipped when aqlm isn't installed. """ return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case) def require_hqq(test_case): """ Decorator marking a test that requires aqlm. These tests are skipped when aqlm isn't installed. """ return unittest.skipUnless(is_hqq_available(), "test requires hqq")(test_case) def require_auto_awq(test_case): """ Decorator marking a test that requires auto-awq. These tests are skipped when auto-awq isn't installed. """ return unittest.skipUnless(is_auto_awq_available(), "test requires auto-awq")(test_case) def require_eetq(test_case): """ Decorator marking a test that requires eetq. These tests are skipped when eetq isn't installed. """ return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case) def require_optimum(test_case): """ Decorator marking a test that requires optimum. These tests are skipped when optimum isn't installed. """ return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case) def require_torchao(test_case): """ Decorator marking a test that requires torchao. These tests are skipped when torchao isn't installed. """ return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case) def require_deterministic_for_xpu(test_case): @wraps(test_case) def wrapper(*args, **kwargs): if torch_device == "xpu": original_state = torch.are_deterministic_algorithms_enabled() try: torch.use_deterministic_algorithms(True) return test_case(*args, **kwargs) finally: torch.use_deterministic_algorithms(original_state) else: return test_case(*args, **kwargs) return wrapper @contextmanager def temp_seed(seed: int): """Temporarily set the random seed. This works for python numpy, pytorch.""" np_state = np.random.get_state() np.random.seed(seed) torch_state = torch.random.get_rng_state() torch.random.manual_seed(seed) if torch.cuda.is_available(): torch_cuda_states = torch.cuda.get_rng_state_all() torch.cuda.manual_seed_all(seed) try: yield finally: np.random.set_state(np_state) torch.random.set_rng_state(torch_state) if torch.cuda.is_available(): torch.cuda.set_rng_state_all(torch_cuda_states) def get_state_dict(model, unwrap_compiled=True): """ Get the state dict of a model. If the model is compiled, unwrap it first. """ if unwrap_compiled: model = getattr(model, "_orig_mod", model) return model.state_dict() @lru_cache def load_dataset_english_quotes(): # can't use pytest fixtures for now because of unittest style tests data = load_dataset("ybelkada/english_quotes_copy") return data @lru_cache def load_cat_image(): # can't use pytest fixtures for now because of unittest style tests dataset = load_dataset("huggingface/cats-image", trust_remote_code=True) image = dataset["test"]["image"][0] return image def set_init_weights_false(config_cls, kwargs): # helper function that sets the config kwargs such that the model is *not* initialized as an identity transform kwargs = kwargs.copy() if issubclass(config_cls, PromptLearningConfig): return kwargs if config_cls in (LNTuningConfig, VBLoRAConfig): return kwargs if config_cls in (LoraConfig, AdaLoraConfig): kwargs["init_lora_weights"] = False elif config_cls == IA3Config: kwargs["init_ia3_weights"] = False else: kwargs["init_weights"] = False return kwargs @contextmanager def hub_online_once(model_id: str): """Set env[HF_HUB_OFFLINE]=1 (and patch transformers/hugging_face_hub to think that it was always that way) for model ids that were already to avoid contacting the hub twice for the same model id in the context. The global variable `_HUB_MODEL_ACCESSES` tracks the number of hits per model id between `hub_online_once` calls. The reason for doing a context manager and not patching specific methods (e.g., `from_pretrained`) is that there are a lot of places (`PeftConfig.from_pretrained`, `get_peft_state_dict`, `load_adapter`, ...) that possibly communicate with the hub to download files / check versions / etc. Note that using this context manager can cause problems when used in code sections that access different resources. Example: ``` def test_something(model_id, config_kwargs): with hub_online_once(model_id): model = ...from_pretrained(model_id) self.do_something_specific_with_model(model) ``` It is assumed that `do_something_specific_with_model` is an absract method that is implement by several tests. Imagine the first test simply does `model.generate([1,2,3])`. The second call from another test suite however uses a tokenizer (`AutoTokenizer.from_pretrained(model_id)`) - this will fail since the first pass was online but didn't use the tokenizer and we're now in offline mode and cannot fetch the tokenizer. The recommended workaround is to extend the cache key (`model_id` passed to `hub_online_once` in this case) by something in case the tokenizer is used, so that these tests don't share a cache pool with the tests that don't use a tokenizer. It is best to avoid using this context manager in *yield* fixtures (normal fixtures are fine) as this is equivalent to wrapping the whole test in the context manager without explicitly writing it out, leading to unexpected `HF_HUB_OFFLINE` behavior in the test body. """ global _HUB_MODEL_ACCESSES override = {} try: if model_id in _HUB_MODEL_ACCESSES: override = {"HF_HUB_OFFLINE": "1"} _HUB_MODEL_ACCESSES[model_id] += 1 else: if model_id not in _HUB_MODEL_ACCESSES: _HUB_MODEL_ACCESSES[model_id] = 0 with ( # strictly speaking it is not necessary to set the environment variable since most code that's out there # is evaluating it at import time and we'd have to reload the modules for it to take effect. It's # probably still a good idea to have it if there's some dynamic code that checks it. mock.patch.dict(os.environ, override), mock.patch("huggingface_hub.constants.HF_HUB_OFFLINE", override.get("HF_HUB_OFFLINE", False) == "1"), mock.patch("transformers.utils.hub._is_offline_mode", override.get("HF_HUB_OFFLINE", False) == "1"), ): yield except Exception: # in case of an error we have to assume that we didn't access the model properly from the hub # for the first time, so the next call cannot be considered cached. if _HUB_MODEL_ACCESSES.get(model_id) == 0: del _HUB_MODEL_ACCESSES[model_id] raise
peft/tests/testing_utils.py/0
{ "file_path": "peft/tests/testing_utils.py", "repo_id": "peft", "token_count": 3986 }
246
#!/usr/bin/env python3 """ Model Benchmark Script An inference and train step benchmark script for timm models. Hacked together by Ross Wightman (https://github.com/rwightman) """ import argparse import csv import json import logging import time from collections import OrderedDict from contextlib import suppress from functools import partial import torch import torch.nn as nn import torch.nn.parallel from timm.data import resolve_data_config from timm.layers import set_fast_norm from timm.models import create_model, is_model, list_models from timm.optim import create_optimizer_v2 from timm.utils import setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs,\ reparameterize_model has_apex = False try: from apex import amp has_apex = True except ImportError: pass try: from deepspeed.profiling.flops_profiler import get_model_profile has_deepspeed_profiling = True except ImportError as e: has_deepspeed_profiling = False try: from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis has_fvcore_profiling = True except ImportError as e: FlopCountAnalysis = None has_fvcore_profiling = False try: from functorch.compile import memory_efficient_fusion has_functorch = True except ImportError as e: has_functorch = False has_compile = hasattr(torch, 'compile') if torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True _logger = logging.getLogger('validate') parser = argparse.ArgumentParser(description='PyTorch Benchmark') # benchmark specific args parser.add_argument('--model-list', metavar='NAME', default='', help='txt file based list of model names to benchmark') parser.add_argument('--bench', default='both', type=str, help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") parser.add_argument('--detail', action='store_true', default=False, help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') parser.add_argument('--no-retry', action='store_true', default=False, help='Do not decay batch size and retry on error.') parser.add_argument('--results-file', default='', type=str, help='Output csv file for validation results (summary)') parser.add_argument('--results-format', default='csv', type=str, help='Format for results file one of (csv, json) (default: csv).') parser.add_argument('--num-warm-iter', default=10, type=int, help='Number of warmup iterations (default: 10)') parser.add_argument('--num-bench-iter', default=40, type=int, help='Number of benchmark iterations (default: 40)') parser.add_argument('--device', default='cuda', type=str, help="device to run benchmark on") # common inference / train args parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', help='model architecture (default: resnet50)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--use-train-size', action='store_true', default=False, help='Run inference at train size, not test-input-size if it exists.') parser.add_argument('--num-classes', type=int, default=None, help='Number classes in dataset') parser.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--grad-checkpointing', action='store_true', default=False, help='Enable gradient checkpointing through model blocks/stages') parser.add_argument('--amp', action='store_true', default=False, help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') parser.add_argument('--amp-dtype', default='float16', type=str, help='lower precision AMP dtype (default: float16). Overrides --precision arg if args.amp True.') parser.add_argument('--precision', default='float32', type=str, help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') parser.add_argument('--fuser', default='', type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") parser.add_argument('--fast-norm', default=False, action='store_true', help='enable experimental fast-norm') parser.add_argument('--reparam', default=False, action='store_true', help='Reparameterize model') parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs) parser.add_argument('--torchcompile-mode', type=str, default=None, help="torch.compile mode (default: None).") # codegen (model compilation) options scripting_group = parser.add_mutually_exclusive_group() scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true', help='convert model torchscript for inference') scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor', help="Enable compilation w/ specified backend (default: inductor).") scripting_group.add_argument('--aot-autograd', default=False, action='store_true', help="Enable AOT Autograd optimization.") # train optimizer parameters parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd"') parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='Optimizer momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=0.0001, help='weight decay (default: 0.0001)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--clip-mode', type=str, default='norm', help='Gradient clipping mode. One of ("norm", "value", "agc")') # model regularization / loss params that impact model or loss fn parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', help='Drop path rate (default: None)') parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') def timestamp(sync=False): return time.perf_counter() def cuda_timestamp(sync=False, device=None): if sync: torch.cuda.synchronize(device=device) return time.perf_counter() def count_params(model: nn.Module): return sum([m.numel() for m in model.parameters()]) def resolve_precision(precision: str): assert precision in ('amp', 'amp_bfloat16', 'float16', 'bfloat16', 'float32') amp_dtype = None # amp disabled model_dtype = torch.float32 data_dtype = torch.float32 if precision == 'amp': amp_dtype = torch.float16 elif precision == 'amp_bfloat16': amp_dtype = torch.bfloat16 elif precision == 'float16': model_dtype = torch.float16 data_dtype = torch.float16 elif precision == 'bfloat16': model_dtype = torch.bfloat16 data_dtype = torch.bfloat16 return amp_dtype, model_dtype, data_dtype def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False): _, macs, _ = get_model_profile( model=model, input_shape=(batch_size,) + input_size, # input shape/resolution print_profile=detailed, # prints the model graph with the measured profile attached to each module detailed=detailed, # print the detailed profile warm_up=10, # the number of warm-ups before measuring the time of each module as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k) output_file=None, # path to the output file. If None, the profiler prints to stdout. ignore_modules=None) # the list of modules to ignore in the profiling return macs, 0 # no activation count in DS def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False): if force_cpu: model = model.to('cpu') device, dtype = next(model.parameters()).device, next(model.parameters()).dtype example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype) fca = FlopCountAnalysis(model, example_input) aca = ActivationCountAnalysis(model, example_input) if detailed: fcs = flop_count_str(fca) print(fcs) return fca.total(), aca.total() class BenchmarkRunner: def __init__( self, model_name, detail=False, device='cuda', torchscript=False, torchcompile=None, torchcompile_mode=None, aot_autograd=False, reparam=False, precision='float32', fuser='', num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs ): self.model_name = model_name self.detail = detail self.device = device self.amp_dtype, self.model_dtype, self.data_dtype = resolve_precision(precision) self.channels_last = kwargs.pop('channels_last', False) if self.amp_dtype is not None: self.amp_autocast = partial(torch.amp.autocast, device_type=device, dtype=self.amp_dtype) else: self.amp_autocast = suppress if fuser: set_jit_fuser(fuser) self.model = create_model( model_name, num_classes=kwargs.pop('num_classes', None), in_chans=3, global_pool=kwargs.pop('gp', 'fast'), scriptable=torchscript, drop_rate=kwargs.pop('drop', 0.), drop_path_rate=kwargs.pop('drop_path', None), drop_block_rate=kwargs.pop('drop_block', None), **kwargs.pop('model_kwargs', {}), ) if reparam: self.model = reparameterize_model(self.model) self.model.to( device=self.device, dtype=self.model_dtype, memory_format=torch.channels_last if self.channels_last else None, ) self.num_classes = self.model.num_classes self.param_count = count_params(self.model) _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size) self.input_size = data_config['input_size'] self.batch_size = kwargs.pop('batch_size', 256) self.compiled = False if torchscript: self.model = torch.jit.script(self.model) self.compiled = True elif torchcompile: assert has_compile, 'A version of torch w/ torch.compile() is required, possibly a nightly.' torch._dynamo.reset() self.model = torch.compile(self.model, backend=torchcompile, mode=torchcompile_mode) self.compiled = True elif aot_autograd: assert has_functorch, "functorch is needed for --aot-autograd" self.model = memory_efficient_fusion(self.model) self.compiled = True self.example_inputs = None self.num_warm_iter = num_warm_iter self.num_bench_iter = num_bench_iter self.log_freq = num_bench_iter // 5 if 'cuda' in self.device: self.time_fn = partial(cuda_timestamp, device=self.device) else: self.time_fn = timestamp def _init_input(self): self.example_inputs = torch.randn( (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) if self.channels_last: self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) class InferenceBenchmarkRunner(BenchmarkRunner): def __init__( self, model_name, device='cuda', torchscript=False, **kwargs ): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.eval() def run(self): def _step(): t_step_start = self.time_fn() with self.amp_autocast(): output = self.model(self.example_inputs) t_step_end = self.time_fn(True) return t_step_end - t_step_start _logger.info( f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') with torch.inference_mode(): self._init_input() for _ in range(self.num_warm_iter): _step() total_step = 0. num_samples = 0 t_run_start = self.time_fn() for i in range(self.num_bench_iter): delta_fwd = _step() total_step += delta_fwd num_samples += self.batch_size num_steps = i + 1 if num_steps % self.log_freq == 0: _logger.info( f"Infer [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_step / num_steps:0.3f} ms/step.") t_run_end = self.time_fn(True) t_run_elapsed = t_run_end - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) retries = 0 if self.compiled else 2 # skip profiling if model is scripted while retries: retries -= 1 try: if has_deepspeed_profiling: macs, _ = profile_deepspeed(self.model, self.input_size) results['gmacs'] = round(macs / 1e9, 2) elif has_fvcore_profiling: macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries) results['gmacs'] = round(macs / 1e9, 2) results['macts'] = round(activations / 1e6, 2) except RuntimeError as e: pass _logger.info( f"Inference benchmark of {self.model_name} done. " f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") return results class TrainBenchmarkRunner(BenchmarkRunner): def __init__( self, model_name, device='cuda', torchscript=False, **kwargs ): super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) self.model.train() self.loss = nn.CrossEntropyLoss().to(self.device) self.target_shape = tuple() self.optimizer = create_optimizer_v2( self.model, opt=kwargs.pop('opt', 'sgd'), lr=kwargs.pop('lr', 1e-4)) if kwargs.pop('grad_checkpointing', False): self.model.set_grad_checkpointing() def _gen_target(self, batch_size): return torch.empty( (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) def run(self): def _step(detail=False): self.optimizer.zero_grad() # can this be ignored? t_start = self.time_fn() t_fwd_end = t_start t_bwd_end = t_start with self.amp_autocast(): output = self.model(self.example_inputs) if isinstance(output, tuple): output = output[0] if detail: t_fwd_end = self.time_fn(True) target = self._gen_target(output.shape[0]) self.loss(output, target).backward() if detail: t_bwd_end = self.time_fn(True) self.optimizer.step() t_end = self.time_fn(True) if detail: delta_fwd = t_fwd_end - t_start delta_bwd = t_bwd_end - t_fwd_end delta_opt = t_end - t_bwd_end return delta_fwd, delta_bwd, delta_opt else: delta_step = t_end - t_start return delta_step _logger.info( f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') self._init_input() for _ in range(self.num_warm_iter): _step() t_run_start = self.time_fn() if self.detail: total_fwd = 0. total_bwd = 0. total_opt = 0. num_samples = 0 for i in range(self.num_bench_iter): delta_fwd, delta_bwd, delta_opt = _step(True) num_samples += self.batch_size total_fwd += delta_fwd total_bwd += delta_bwd total_opt += delta_opt num_steps = (i + 1) if num_steps % self.log_freq == 0: total_step = total_fwd + total_bwd + total_opt _logger.info( f"Train [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd," f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd," f" {1000 * total_opt / num_steps:0.3f} ms/step opt." ) total_step = total_fwd + total_bwd + total_opt t_run_elapsed = self.time_fn() - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), opt_time=round(1000 * total_opt / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) else: total_step = 0. num_samples = 0 for i in range(self.num_bench_iter): delta_step = _step(False) num_samples += self.batch_size total_step += delta_step num_steps = (i + 1) if num_steps % self.log_freq == 0: _logger.info( f"Train [{num_steps}/{self.num_bench_iter}]." f" {num_samples / total_step:0.2f} samples/sec." f" {1000 * total_step / num_steps:0.3f} ms/step.") t_run_elapsed = self.time_fn() - t_run_start results = dict( samples_per_sec=round(num_samples / t_run_elapsed, 2), step_time=round(1000 * total_step / self.num_bench_iter, 3), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) _logger.info( f"Train benchmark of {self.model_name} done. " f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") return results class ProfileRunner(BenchmarkRunner): def __init__(self, model_name, device='cuda', profiler='', **kwargs): super().__init__(model_name=model_name, device=device, **kwargs) if not profiler: if has_deepspeed_profiling: profiler = 'deepspeed' elif has_fvcore_profiling: profiler = 'fvcore' assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work." self.profiler = profiler self.model.eval() def run(self): _logger.info( f'Running profiler on {self.model_name} w/ ' f'input size {self.input_size} and batch size {self.batch_size}.') macs = 0 activations = 0 if self.profiler == 'deepspeed': macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True) elif self.profiler == 'fvcore': macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True) results = dict( gmacs=round(macs / 1e9, 2), macts=round(activations / 1e6, 2), batch_size=self.batch_size, img_size=self.input_size[-1], param_count=round(self.param_count / 1e6, 2), ) _logger.info( f"Profile of {self.model_name} done. " f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.") return results def _try_run( model_name, bench_fn, bench_kwargs, initial_batch_size, no_batch_size_retry=False ): batch_size = initial_batch_size results = dict() error_str = 'Unknown' while batch_size: try: torch.cuda.empty_cache() bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) results = bench.run() return results except RuntimeError as e: error_str = str(e) _logger.error(f'"{error_str}" while running benchmark.') if not check_batch_size_retry(error_str): _logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.') break if no_batch_size_retry: break batch_size = decay_batch_step(batch_size) _logger.warning(f'Reducing batch size to {batch_size} for retry.') results['error'] = error_str return results def benchmark(args): if args.amp: _logger.warning("Overriding precision to 'amp' since --amp flag set.") args.precision = 'amp' if args.amp_dtype == 'float16' else '_'.join(['amp', args.amp_dtype]) _logger.info(f'Benchmarking in {args.precision} precision. ' f'{"NHWC" if args.channels_last else "NCHW"} layout. ' f'torchscript {"enabled" if args.torchscript else "disabled"}') bench_kwargs = vars(args).copy() bench_kwargs.pop('amp') model = bench_kwargs.pop('model') batch_size = bench_kwargs.pop('batch_size') bench_fns = (InferenceBenchmarkRunner,) prefixes = ('infer',) if args.bench == 'both': bench_fns = ( InferenceBenchmarkRunner, TrainBenchmarkRunner ) prefixes = ('infer', 'train') elif args.bench == 'train': bench_fns = TrainBenchmarkRunner, prefixes = 'train', elif args.bench.startswith('profile'): # specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore if 'deepspeed' in args.bench: assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter" bench_kwargs['profiler'] = 'deepspeed' elif 'fvcore' in args.bench: assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter" bench_kwargs['profiler'] = 'fvcore' bench_fns = ProfileRunner, batch_size = 1 model_results = OrderedDict(model=model) for prefix, bench_fn in zip(prefixes, bench_fns): run_results = _try_run( model, bench_fn, bench_kwargs=bench_kwargs, initial_batch_size=batch_size, no_batch_size_retry=args.no_retry, ) if prefix and 'error' not in run_results: run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()} model_results.update(run_results) if 'error' in run_results: break if 'error' not in model_results: param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) model_results.setdefault('param_count', param_count) model_results.pop('train_param_count', 0) return model_results def main(): setup_default_logging() args = parser.parse_args() model_cfgs = [] model_names = [] if args.fast_norm: set_fast_norm() if args.model_list: args.model = '' with open(args.model_list) as f: model_names = [line.rstrip() for line in f] model_cfgs = [(n, None) for n in model_names] elif args.model == 'all': # validate all models in a list of names with pretrained checkpoints args.pretrained = True model_names = list_models(pretrained=True, exclude_filters=['*in21k']) model_cfgs = [(n, None) for n in model_names] elif not is_model(args.model): # model name doesn't exist, try as wildcard filter model_names = list_models(args.model) model_cfgs = [(n, None) for n in model_names] if len(model_cfgs): _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) results = [] try: for m, _ in model_cfgs: if not m: continue args.model = m r = benchmark(args) if r: results.append(r) time.sleep(10) except KeyboardInterrupt as e: pass sort_key = 'infer_samples_per_sec' if 'train' in args.bench: sort_key = 'train_samples_per_sec' elif 'profile' in args.bench: sort_key = 'infer_gmacs' results = filter(lambda x: sort_key in x, results) results = sorted(results, key=lambda x: x[sort_key], reverse=True) else: results = benchmark(args) if args.results_file: write_results(args.results_file, results, format=args.results_format) # output results in JSON to stdout w/ delimiter for runner script print(f'--result\n{json.dumps(results, indent=4)}') def write_results(results_file, results, format='csv'): with open(results_file, mode='w') as cf: if format == 'json': json.dump(results, cf, indent=4) else: if not isinstance(results, (list, tuple)): results = [results] if not results: return dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == '__main__': main()
pytorch-image-models/benchmark.py/0
{ "file_path": "pytorch-image-models/benchmark.py", "repo_id": "pytorch-image-models", "token_count": 13297 }
247
# Big Transfer (BiT) **Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `resnetv2_101x1_bitm`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{kolesnikov2020big, title={Big Transfer (BiT): General Visual Representation Learning}, author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, year={2020}, eprint={1912.11370}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Big Transfer Paper: Title: 'Big Transfer (BiT): General Visual Representation Learning' URL: https://paperswithcode.com/paper/large-scale-learning-of-general-visual Models: - Name: resnetv2_101x1_bitm In Collection: Big Transfer Metadata: FLOPs: 5330896 Parameters: 44540000 File Size: 178256468 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_101x1_bitm LR: 0.03 Epochs: 90 Layers: 101 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L444 Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.21% Top 5 Accuracy: 96.47% - Name: resnetv2_101x3_bitm In Collection: Big Transfer Metadata: FLOPs: 15988688 Parameters: 387930000 File Size: 1551830100 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_101x3_bitm LR: 0.03 Epochs: 90 Layers: 101 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L451 Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.38% Top 5 Accuracy: 97.37% - Name: resnetv2_152x2_bitm In Collection: Big Transfer Metadata: FLOPs: 10659792 Parameters: 236340000 File Size: 945476668 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M ID: resnetv2_152x2_bitm Crop Pct: '1.0' Image Size: '480' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L458 Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.4% Top 5 Accuracy: 97.43% - Name: resnetv2_152x4_bitm In Collection: Big Transfer Metadata: FLOPs: 21317584 Parameters: 936530000 File Size: 3746270104 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_152x4_bitm Crop Pct: '1.0' Image Size: '480' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L465 Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.95% Top 5 Accuracy: 97.45% - Name: resnetv2_50x1_bitm In Collection: Big Transfer Metadata: FLOPs: 5330896 Parameters: 25550000 File Size: 102242668 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_50x1_bitm LR: 0.03 Epochs: 90 Layers: 50 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L430 Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.19% Top 5 Accuracy: 95.63% - Name: resnetv2_50x3_bitm In Collection: Big Transfer Metadata: FLOPs: 15988688 Parameters: 217320000 File Size: 869321580 Architecture: - 1x1 Convolution - Bottleneck Residual Block - Convolution - Global Average Pooling - Group Normalization - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Weight Standardization Tasks: - Image Classification Training Techniques: - Mixup - SGD with Momentum - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPUv3-512 ID: resnetv2_50x3_bitm LR: 0.03 Epochs: 90 Layers: 50 Crop Pct: '1.0' Momentum: 0.9 Batch Size: 4096 Image Size: '480' Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L437 Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 83.75% Top 5 Accuracy: 97.12% -->
pytorch-image-models/hfdocs/source/models/big-transfer.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/big-transfer.mdx", "repo_id": "pytorch-image-models", "token_count": 4104 }
248
# Noisy Student (EfficientNet) **Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps: 1. train a teacher model on labeled images 2. use the teacher to generate pseudo labels on unlabeled images 3. train a student model on the combination of labeled images and pseudo labeled images. The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student. Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ns`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{xie2020selftraining, title={Self-training with Noisy Student improves ImageNet classification}, author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le}, year={2020}, eprint={1911.04252}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: Noisy Student Paper: Title: Self-training with Noisy Student improves ImageNet classification URL: https://paperswithcode.com/paper/self-training-with-noisy-student-improves Models: - Name: tf_efficientnet_b0_ns In Collection: Noisy Student Metadata: FLOPs: 488688572 Parameters: 5290000 File Size: 21386709 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b0_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 2048 Image Size: '224' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1427 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.66% Top 5 Accuracy: 94.37% - Name: tf_efficientnet_b1_ns In Collection: Noisy Student Metadata: FLOPs: 883633200 Parameters: 7790000 File Size: 31516408 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b1_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.882' Momentum: 0.9 Batch Size: 2048 Image Size: '240' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1437 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 81.39% Top 5 Accuracy: 95.74% - Name: tf_efficientnet_b2_ns In Collection: Noisy Student Metadata: FLOPs: 1234321170 Parameters: 9110000 File Size: 36801803 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b2_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.89' Momentum: 0.9 Batch Size: 2048 Image Size: '260' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1447 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 82.39% Top 5 Accuracy: 96.24% - Name: tf_efficientnet_b3_ns In Collection: Noisy Student Metadata: FLOPs: 2275247568 Parameters: 12230000 File Size: 49385734 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b3_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.904' Momentum: 0.9 Batch Size: 2048 Image Size: '300' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1457 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 84.04% Top 5 Accuracy: 96.91% - Name: tf_efficientnet_b4_ns In Collection: Noisy Student Metadata: FLOPs: 5749638672 Parameters: 19340000 File Size: 77995057 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b4_ns LR: 0.128 Epochs: 700 Dropout: 0.5 Crop Pct: '0.922' Momentum: 0.9 Batch Size: 2048 Image Size: '380' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1467 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 85.15% Top 5 Accuracy: 97.47% - Name: tf_efficientnet_b5_ns In Collection: Noisy Student Metadata: FLOPs: 13176501888 Parameters: 30390000 File Size: 122404944 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b5_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.934' Momentum: 0.9 Batch Size: 2048 Image Size: '456' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1477 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.08% Top 5 Accuracy: 97.75% - Name: tf_efficientnet_b6_ns In Collection: Noisy Student Metadata: FLOPs: 24180518488 Parameters: 43040000 File Size: 173239537 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b6_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.942' Momentum: 0.9 Batch Size: 2048 Image Size: '528' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1487 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.45% Top 5 Accuracy: 97.88% - Name: tf_efficientnet_b7_ns In Collection: Noisy Student Metadata: FLOPs: 48205304880 Parameters: 66349999 File Size: 266853140 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod ID: tf_efficientnet_b7_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.949' Momentum: 0.9 Batch Size: 2048 Image Size: '600' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1498 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 86.83% Top 5 Accuracy: 98.08% - Name: tf_efficientnet_l2_ns In Collection: Noisy Student Metadata: FLOPs: 611646113804 Parameters: 480310000 File Size: 1925950424 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Techniques: - AutoAugment - FixRes - Label Smoothing - Noisy Student - RMSProp - RandAugment - Weight Decay Training Data: - ImageNet - JFT-300M Training Resources: Cloud TPU v3 Pod Training Time: 6 days ID: tf_efficientnet_l2_ns LR: 0.128 Epochs: 350 Dropout: 0.5 Crop Pct: '0.96' Momentum: 0.9 Batch Size: 2048 Image Size: '800' Weight Decay: 1.0e-05 Interpolation: bicubic RMSProp Decay: 0.9 Label Smoothing: 0.1 BatchNorm Momentum: 0.99 Stochastic Depth Survival: 0.8 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1520 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 88.35% Top 5 Accuracy: 98.66% -->
pytorch-image-models/hfdocs/source/models/noisy-student.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/noisy-student.mdx", "repo_id": "pytorch-image-models", "token_count": 6686 }
249
# SPNASNet **Single-Path NAS** is a novel differentiable NAS method for designing hardware-efficient ConvNets in less than 4 hours. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('spnasnet_100', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `spnasnet_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('spnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{stamoulis2019singlepath, title={Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours}, author={Dimitrios Stamoulis and Ruizhou Ding and Di Wang and Dimitrios Lymberopoulos and Bodhi Priyantha and Jie Liu and Diana Marculescu}, year={2019}, eprint={1904.02877}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: SPNASNet Paper: Title: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours' URL: https://paperswithcode.com/paper/single-path-nas-designing-hardware-efficient Models: - Name: spnasnet_100 In Collection: SPNASNet Metadata: FLOPs: 442385600 Parameters: 4420000 File Size: 17902337 Architecture: - Average Pooling - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - ReLU Tasks: - Image Classification Training Data: - ImageNet ID: spnasnet_100 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L995 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.08% Top 5 Accuracy: 91.82% -->
pytorch-image-models/hfdocs/source/models/spnasnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/spnasnet.mdx", "repo_id": "pytorch-image-models", "token_count": 1511 }
250
# Optimization This page contains the API reference documentation for learning rate optimizers included in `timm`. ## Optimizers ### Factory functions [[autodoc]] timm.optim.create_optimizer_v2 [[autodoc]] timm.optim.list_optimizers [[autodoc]] timm.optim.get_optimizer_class ### Optimizer Classes [[autodoc]] timm.optim.adabelief.AdaBelief [[autodoc]] timm.optim.adafactor.Adafactor [[autodoc]] timm.optim.adafactor_bv.AdafactorBigVision [[autodoc]] timm.optim.adahessian.Adahessian [[autodoc]] timm.optim.adamp.AdamP [[autodoc]] timm.optim.adan.Adan [[autodoc]] timm.optim.adopt.Adopt [[autodoc]] timm.optim.lamb.Lamb [[autodoc]] timm.optim.laprop.LaProp [[autodoc]] timm.optim.lars.Lars [[autodoc]] timm.optim.lion.Lion [[autodoc]] timm.optim.lookahead.Lookahead [[autodoc]] timm.optim.madgrad.MADGRAD [[autodoc]] timm.optim.mars.Mars [[autodoc]] timm.optim.nadamw.NAdamW [[autodoc]] timm.optim.nvnovograd.NvNovoGrad [[autodoc]] timm.optim.rmsprop_tf.RMSpropTF [[autodoc]] timm.optim.sgdp.SGDP [[autodoc]] timm.optim.sgdw.SGDW
pytorch-image-models/hfdocs/source/reference/optimizers.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/reference/optimizers.mdx", "repo_id": "pytorch-image-models", "token_count": 425 }
251
""" Random Erasing (Cutout) Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0 Copyright Zhun Zhong & Liang Zheng Hacked together by / Copyright 2019, Ross Wightman """ import random import math import torch def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() # paths, flip the order so normal is run on CPU if this becomes a problem # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 if per_pixel: return torch.empty(patch_size, dtype=dtype, device=device).normal_() elif rand_color: return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() else: return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) class RandomErasing: """ Randomly selects a rectangle region in an image and erases its pixels. 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/pdf/1708.04896.pdf This variant of RandomErasing is intended to be applied to either a batch or single image tensor after it has been normalized by dataset mean and std. Args: probability: Probability that the Random Erasing operation will be performed. min_area: Minimum percentage of erased area wrt input image area. max_area: Maximum percentage of erased area wrt input image area. min_aspect: Minimum aspect ratio of erased area. mode: pixel color mode, one of 'const', 'rand', or 'pixel' 'const' - erase block is constant color of 0 for all channels 'rand' - erase block is same per-channel random (normal) color 'pixel' - erase block is per-pixel random (normal) color max_count: maximum number of erasing blocks per image, area per box is scaled by count. per-image count is randomly chosen between 1 and this value. """ def __init__( self, probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None, mode='const', min_count=1, max_count=None, num_splits=0, device='cuda', ): self.probability = probability self.min_area = min_area self.max_area = max_area max_aspect = max_aspect or 1 / min_aspect self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) self.min_count = min_count self.max_count = max_count or min_count self.num_splits = num_splits self.mode = mode.lower() self.rand_color = False self.per_pixel = False if self.mode == 'rand': self.rand_color = True # per block random normal elif self.mode == 'pixel': self.per_pixel = True # per pixel random normal else: assert not self.mode or self.mode == 'const' self.device = device def _erase(self, img, chan, img_h, img_w, dtype): if random.random() > self.probability: return area = img_h * img_w count = self.min_count if self.min_count == self.max_count else \ random.randint(self.min_count, self.max_count) for _ in range(count): for attempt in range(10): target_area = random.uniform(self.min_area, self.max_area) * area / count aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) h = int(round(math.sqrt(target_area * aspect_ratio))) w = int(round(math.sqrt(target_area / aspect_ratio))) if w < img_w and h < img_h: top = random.randint(0, img_h - h) left = random.randint(0, img_w - w) img[:, top:top + h, left:left + w] = _get_pixels( self.per_pixel, self.rand_color, (chan, h, w), dtype=dtype, device=self.device, ) break def __call__(self, input): if len(input.size()) == 3: self._erase(input, *input.size(), input.dtype) else: batch_size, chan, img_h, img_w = input.size() # skip first slice of batch if num_splits is set (for clean portion of samples) batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 for i in range(batch_start, batch_size): self._erase(input[i], chan, img_h, img_w, input.dtype) return input def __repr__(self): # NOTE simplified state for repr fs = self.__class__.__name__ + f'(p={self.probability}, mode={self.mode}' fs += f', count=({self.min_count}, {self.max_count}))' return fs
pytorch-image-models/timm/data/random_erasing.py/0
{ "file_path": "pytorch-image-models/timm/data/random_erasing.py", "repo_id": "pytorch-image-models", "token_count": 2258 }
252
import math import numbers import random import warnings from typing import List, Sequence, Tuple, Union import torch import torchvision.transforms as transforms import torchvision.transforms.functional as F try: from torchvision.transforms.functional import InterpolationMode has_interpolation_mode = True except ImportError: has_interpolation_mode = False from PIL import Image import numpy as np __all__ = [ "ToNumpy", "ToTensor", "str_to_interp_mode", "str_to_pil_interp", "interp_mode_to_str", "RandomResizedCropAndInterpolation", "CenterCropOrPad", "center_crop_or_pad", "crop_or_pad", "RandomCropOrPad", "RandomPad", "ResizeKeepRatio", "TrimBorder", "MaybeToTensor", "MaybePILToTensor" ] class ToNumpy: def __call__(self, pil_img): np_img = np.array(pil_img, dtype=np.uint8) if np_img.ndim < 3: np_img = np.expand_dims(np_img, axis=-1) np_img = np.rollaxis(np_img, 2) # HWC to CHW return np_img class ToTensor: """ ToTensor with no rescaling of values""" def __init__(self, dtype=torch.float32): self.dtype = dtype def __call__(self, pil_img): return F.pil_to_tensor(pil_img).to(dtype=self.dtype) class MaybeToTensor(transforms.ToTensor): """Convert a PIL Image or ndarray to tensor if it's not already one. """ def __init__(self) -> None: super().__init__() def __call__(self, pic) -> torch.Tensor: """ Args: pic (PIL Image or numpy.ndarray): Image to be converted to tensor. Returns: Tensor: Converted image. """ if isinstance(pic, torch.Tensor): return pic return F.to_tensor(pic) def __repr__(self) -> str: return f"{self.__class__.__name__}()" class MaybePILToTensor: """Convert a PIL Image to a tensor of the same type - this does not scale values. """ def __init__(self) -> None: super().__init__() def __call__(self, pic): """ Note: A deep copy of the underlying array is performed. Args: pic (PIL Image): Image to be converted to tensor. Returns: Tensor: Converted image. """ if isinstance(pic, torch.Tensor): return pic return F.pil_to_tensor(pic) def __repr__(self) -> str: return f"{self.__class__.__name__}()" # Pillow is deprecating the top-level resampling attributes (e.g., Image.BILINEAR) in # favor of the Image.Resampling enum. The top-level resampling attributes will be # removed in Pillow 10. if hasattr(Image, "Resampling"): _pil_interpolation_to_str = { Image.Resampling.NEAREST: 'nearest', Image.Resampling.BILINEAR: 'bilinear', Image.Resampling.BICUBIC: 'bicubic', Image.Resampling.BOX: 'box', Image.Resampling.HAMMING: 'hamming', Image.Resampling.LANCZOS: 'lanczos', } else: _pil_interpolation_to_str = { Image.NEAREST: 'nearest', Image.BILINEAR: 'bilinear', Image.BICUBIC: 'bicubic', Image.BOX: 'box', Image.HAMMING: 'hamming', Image.LANCZOS: 'lanczos', } _str_to_pil_interpolation = {b: a for a, b in _pil_interpolation_to_str.items()} if has_interpolation_mode: _torch_interpolation_to_str = { InterpolationMode.NEAREST: 'nearest', InterpolationMode.BILINEAR: 'bilinear', InterpolationMode.BICUBIC: 'bicubic', InterpolationMode.BOX: 'box', InterpolationMode.HAMMING: 'hamming', InterpolationMode.LANCZOS: 'lanczos', } _str_to_torch_interpolation = {b: a for a, b in _torch_interpolation_to_str.items()} else: _pil_interpolation_to_torch = {} _torch_interpolation_to_str = {} def str_to_pil_interp(mode_str): return _str_to_pil_interpolation[mode_str] def str_to_interp_mode(mode_str): if has_interpolation_mode: return _str_to_torch_interpolation[mode_str] else: return _str_to_pil_interpolation[mode_str] def interp_mode_to_str(mode): if has_interpolation_mode: return _torch_interpolation_to_str[mode] else: return _pil_interpolation_to_str[mode] _RANDOM_INTERPOLATION = (str_to_interp_mode('bilinear'), str_to_interp_mode('bicubic')) def _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."): if isinstance(size, numbers.Number): return int(size), int(size) if isinstance(size, Sequence) and len(size) == 1: return size[0], size[0] if len(size) != 2: raise ValueError(error_msg) return size class RandomResizedCropAndInterpolation: """Crop the given PIL Image to random size and aspect ratio with random interpolation. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: size: expected output size of each edge scale: range of size of the origin size cropped ratio: range of aspect ratio of the origin aspect ratio cropped interpolation: Default: PIL.Image.BILINEAR """ def __init__( self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation='bilinear', ): if isinstance(size, (list, tuple)): self.size = tuple(size) else: self.size = (size, size) if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): warnings.warn("range should be of kind (min, max)") if interpolation == 'random': self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = str_to_interp_mode(interpolation) self.scale = scale self.ratio = ratio @staticmethod def get_params(img, scale, ratio): """Get parameters for ``crop`` for a random sized crop. Args: img (PIL Image): Image to be cropped. scale (tuple): range of size of the origin size cropped ratio (tuple): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ img_w, img_h = F.get_image_size(img) area = img_w * img_h for attempt in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) target_w = int(round(math.sqrt(target_area * aspect_ratio))) target_h = int(round(math.sqrt(target_area / aspect_ratio))) if target_w <= img_w and target_h <= img_h: i = random.randint(0, img_h - target_h) j = random.randint(0, img_w - target_w) return i, j, target_h, target_w # Fallback to central crop in_ratio = img_w / img_h if in_ratio < min(ratio): target_w = img_w target_h = int(round(target_w / min(ratio))) elif in_ratio > max(ratio): target_h = img_h target_w = int(round(target_h * max(ratio))) else: # whole image target_w = img_w target_h = img_h i = (img_h - target_h) // 2 j = (img_w - target_w) // 2 return i, j, target_h, target_w def __call__(self, img): """ Args: img (PIL Image): Image to be cropped and resized. Returns: PIL Image: Randomly cropped and resized image. """ i, j, h, w = self.get_params(img, self.scale, self.ratio) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation return F.resized_crop(img, i, j, h, w, self.size, interpolation) def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation]) else: interpolate_str = interp_mode_to_str(self.interpolation) format_string = self.__class__.__name__ + '(size={0}'.format(self.size) format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) format_string += ', interpolation={0})'.format(interpolate_str) return format_string def center_crop_or_pad( img: torch.Tensor, output_size: Union[int, List[int]], fill: Union[int, Tuple[int, int, int]] = 0, padding_mode: str = 'constant', ) -> torch.Tensor: """Center crops and/or pads the given image. If the image is torch Tensor, it is expected to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. If image size is smaller than output size along any edge, image is padded with 0 and then center cropped. Args: img (PIL Image or Tensor): Image to be cropped. output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int, it is used for both directions. fill (int, Tuple[int]): Padding color Returns: PIL Image or Tensor: Cropped image. """ output_size = _setup_size(output_size) crop_height, crop_width = output_size _, image_height, image_width = F.get_dimensions(img) if crop_width > image_width or crop_height > image_height: padding_ltrb = [ (crop_width - image_width) // 2 if crop_width > image_width else 0, (crop_height - image_height) // 2 if crop_height > image_height else 0, (crop_width - image_width + 1) // 2 if crop_width > image_width else 0, (crop_height - image_height + 1) // 2 if crop_height > image_height else 0, ] img = F.pad(img, padding_ltrb, fill=fill, padding_mode=padding_mode) _, image_height, image_width = F.get_dimensions(img) if crop_width == image_width and crop_height == image_height: return img crop_top = int(round((image_height - crop_height) / 2.0)) crop_left = int(round((image_width - crop_width) / 2.0)) return F.crop(img, crop_top, crop_left, crop_height, crop_width) class CenterCropOrPad(torch.nn.Module): """Crops the given image at the center. If the image is torch Tensor, it is expected to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. If image size is smaller than output size along any edge, image is padded with 0 and then center cropped. Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). """ def __init__( self, size: Union[int, List[int]], fill: Union[int, Tuple[int, int, int]] = 0, padding_mode: str = 'constant', ): super().__init__() self.size = _setup_size(size) self.fill = fill self.padding_mode = padding_mode def forward(self, img): """ Args: img (PIL Image or Tensor): Image to be cropped. Returns: PIL Image or Tensor: Cropped image. """ return center_crop_or_pad(img, self.size, fill=self.fill, padding_mode=self.padding_mode) def __repr__(self) -> str: return f"{self.__class__.__name__}(size={self.size})" def crop_or_pad( img: torch.Tensor, top: int, left: int, height: int, width: int, fill: Union[int, Tuple[int, int, int]] = 0, padding_mode: str = 'constant', ) -> torch.Tensor: """ Crops and/or pads image to meet target size, with control over fill and padding_mode. """ _, image_height, image_width = F.get_dimensions(img) right = left + width bottom = top + height if left < 0 or top < 0 or right > image_width or bottom > image_height: padding_ltrb = [ max(-left + min(0, right), 0), max(-top + min(0, bottom), 0), max(right - max(image_width, left), 0), max(bottom - max(image_height, top), 0), ] img = F.pad(img, padding_ltrb, fill=fill, padding_mode=padding_mode) top = max(top, 0) left = max(left, 0) return F.crop(img, top, left, height, width) class RandomCropOrPad(torch.nn.Module): """ Crop and/or pad image with random placement within the crop or pad margin. """ def __init__( self, size: Union[int, List[int]], fill: Union[int, Tuple[int, int, int]] = 0, padding_mode: str = 'constant', ): super().__init__() self.size = _setup_size(size) self.fill = fill self.padding_mode = padding_mode @staticmethod def get_params(img, size): _, image_height, image_width = F.get_dimensions(img) delta_height = image_height - size[0] delta_width = image_width - size[1] top = int(math.copysign(random.randint(0, abs(delta_height)), delta_height)) left = int(math.copysign(random.randint(0, abs(delta_width)), delta_width)) return top, left def forward(self, img): """ Args: img (PIL Image or Tensor): Image to be cropped. Returns: PIL Image or Tensor: Cropped image. """ top, left = self.get_params(img, self.size) return crop_or_pad( img, top=top, left=left, height=self.size[0], width=self.size[1], fill=self.fill, padding_mode=self.padding_mode, ) def __repr__(self) -> str: return f"{self.__class__.__name__}(size={self.size})" class RandomPad: def __init__(self, input_size, fill=0): self.input_size = input_size self.fill = fill @staticmethod def get_params(img, input_size): width, height = F.get_image_size(img) delta_width = max(input_size[1] - width, 0) delta_height = max(input_size[0] - height, 0) pad_left = random.randint(0, delta_width) pad_top = random.randint(0, delta_height) pad_right = delta_width - pad_left pad_bottom = delta_height - pad_top return pad_left, pad_top, pad_right, pad_bottom def __call__(self, img): padding = self.get_params(img, self.input_size) img = F.pad(img, padding, self.fill) return img class ResizeKeepRatio: """ Resize and Keep Aspect Ratio """ def __init__( self, size, longest=0., interpolation='bilinear', random_scale_prob=0., random_scale_range=(0.85, 1.05), random_scale_area=False, random_aspect_prob=0., random_aspect_range=(0.9, 1.11), ): """ Args: size: longest: interpolation: random_scale_prob: random_scale_range: random_scale_area: random_aspect_prob: random_aspect_range: """ if isinstance(size, (list, tuple)): self.size = tuple(size) else: self.size = (size, size) if interpolation == 'random': self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = str_to_interp_mode(interpolation) self.longest = float(longest) self.random_scale_prob = random_scale_prob self.random_scale_range = random_scale_range self.random_scale_area = random_scale_area self.random_aspect_prob = random_aspect_prob self.random_aspect_range = random_aspect_range @staticmethod def get_params( img, target_size, longest, random_scale_prob=0., random_scale_range=(1.0, 1.33), random_scale_area=False, random_aspect_prob=0., random_aspect_range=(0.9, 1.11) ): """Get parameters """ img_h, img_w = img_size = F.get_dimensions(img)[1:] target_h, target_w = target_size ratio_h = img_h / target_h ratio_w = img_w / target_w ratio = max(ratio_h, ratio_w) * longest + min(ratio_h, ratio_w) * (1. - longest) if random_scale_prob > 0 and random.random() < random_scale_prob: ratio_factor = random.uniform(random_scale_range[0], random_scale_range[1]) if random_scale_area: # make ratio factor equivalent to RRC area crop where < 1.0 = area zoom, # otherwise like affine scale where < 1.0 = linear zoom out ratio_factor = 1. / math.sqrt(ratio_factor) ratio_factor = (ratio_factor, ratio_factor) else: ratio_factor = (1., 1.) if random_aspect_prob > 0 and random.random() < random_aspect_prob: log_aspect = (math.log(random_aspect_range[0]), math.log(random_aspect_range[1])) aspect_factor = math.exp(random.uniform(*log_aspect)) aspect_factor = math.sqrt(aspect_factor) # currently applying random aspect adjustment equally to both dims, # could change to keep output sizes above their target where possible ratio_factor = (ratio_factor[0] / aspect_factor, ratio_factor[1] * aspect_factor) size = [round(x * f / ratio) for x, f in zip(img_size, ratio_factor)] return size def __call__(self, img): """ Args: img (PIL Image): Image to be cropped and resized. Returns: PIL Image: Resized, padded to at least target size, possibly cropped to exactly target size """ size = self.get_params( img, self.size, self.longest, self.random_scale_prob, self.random_scale_range, self.random_scale_area, self.random_aspect_prob, self.random_aspect_range ) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation img = F.resize(img, size, interpolation) return img def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation]) else: interpolate_str = interp_mode_to_str(self.interpolation) format_string = self.__class__.__name__ + '(size={0}'.format(self.size) format_string += f', interpolation={interpolate_str}' format_string += f', longest={self.longest:.3f}' format_string += f', random_scale_prob={self.random_scale_prob:.3f}' format_string += f', random_scale_range=(' \ f'{self.random_scale_range[0]:.3f}, {self.random_aspect_range[1]:.3f})' format_string += f', random_aspect_prob={self.random_aspect_prob:.3f}' format_string += f', random_aspect_range=(' \ f'{self.random_aspect_range[0]:.3f}, {self.random_aspect_range[1]:.3f}))' return format_string class TrimBorder(torch.nn.Module): def __init__( self, border_size: int, ): super().__init__() self.border_size = border_size def forward(self, img): w, h = F.get_image_size(img) top = left = self.border_size top = min(top, h) left = min(left, h) height = max(0, h - 2 * self.border_size) width = max(0, w - 2 * self.border_size) return F.crop(img, top, left, height, width)
pytorch-image-models/timm/data/transforms.py/0
{ "file_path": "pytorch-image-models/timm/data/transforms.py", "repo_id": "pytorch-image-models", "token_count": 9216 }
253
""" Model / Layer Config singleton state """ import os import warnings from typing import Any, Optional import torch __all__ = [ 'is_exportable', 'is_scriptable', 'is_no_jit', 'use_fused_attn', 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config', 'set_fused_attn', 'set_reentrant_ckpt', 'use_reentrant_ckpt' ] # Set to True if prefer to have layers with no jit optimization (includes activations) _NO_JIT = False # Set to True if prefer to have activation layers with no jit optimization # NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying # the jit flags so far are activations. This will change as more layers are updated and/or added. _NO_ACTIVATION_JIT = False # Set to True if exporting a model with Same padding via ONNX _EXPORTABLE = False # Set to True if wanting to use torch.jit.script on a model _SCRIPTABLE = False # use torch.scaled_dot_product_attention where possible _HAS_FUSED_ATTN = hasattr(torch.nn.functional, 'scaled_dot_product_attention') if 'TIMM_FUSED_ATTN' in os.environ: _USE_FUSED_ATTN = int(os.environ['TIMM_FUSED_ATTN']) else: _USE_FUSED_ATTN = 1 # 0 == off, 1 == on (for tested use), 2 == on (for experimental use) if 'TIMM_REENTRANT_CKPT' in os.environ: _USE_REENTRANT_CKPT = bool(os.environ['TIMM_REENTRANT_CKPT']) else: _USE_REENTRANT_CKPT = False # defaults to disabled (off) def is_no_jit(): return _NO_JIT class set_no_jit: def __init__(self, mode: bool) -> None: global _NO_JIT self.prev = _NO_JIT _NO_JIT = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _NO_JIT _NO_JIT = self.prev return False def is_exportable(): return _EXPORTABLE class set_exportable: def __init__(self, mode: bool) -> None: global _EXPORTABLE self.prev = _EXPORTABLE _EXPORTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _EXPORTABLE _EXPORTABLE = self.prev return False def is_scriptable(): return _SCRIPTABLE class set_scriptable: def __init__(self, mode: bool) -> None: global _SCRIPTABLE self.prev = _SCRIPTABLE _SCRIPTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE _SCRIPTABLE = self.prev return False class set_layer_config: """ Layer config context manager that allows setting all layer config flags at once. If a flag arg is None, it will not change the current value. """ def __init__( self, scriptable: Optional[bool] = None, exportable: Optional[bool] = None, no_jit: Optional[bool] = None, no_activation_jit: Optional[bool] = None): global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT if scriptable is not None: _SCRIPTABLE = scriptable if exportable is not None: _EXPORTABLE = exportable if no_jit is not None: _NO_JIT = no_jit if no_activation_jit is not None: _NO_ACTIVATION_JIT = no_activation_jit def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev return False def use_fused_attn(experimental: bool = False) -> bool: # NOTE: ONNX export cannot handle F.scaled_dot_product_attention as of pytorch 2.0 if not _HAS_FUSED_ATTN or _EXPORTABLE: return False if experimental: return _USE_FUSED_ATTN > 1 return _USE_FUSED_ATTN > 0 def set_fused_attn(enable: bool = True, experimental: bool = False): global _USE_FUSED_ATTN if not _HAS_FUSED_ATTN: warnings.warn('This version of pytorch does not have F.scaled_dot_product_attention, fused_attn flag ignored.') return if experimental and enable: _USE_FUSED_ATTN = 2 elif enable: _USE_FUSED_ATTN = 1 else: _USE_FUSED_ATTN = 0 def use_reentrant_ckpt() -> bool: return _USE_REENTRANT_CKPT def set_reentrant_ckpt(enable: bool = True): global _USE_REENTRANT_CKPT _USE_REENTRANT_CKPT = enable
pytorch-image-models/timm/layers/config.py/0
{ "file_path": "pytorch-image-models/timm/layers/config.py", "repo_id": "pytorch-image-models", "token_count": 1974 }
254
from typing import Tuple import torch def ndgrid(*tensors) -> Tuple[torch.Tensor, ...]: """generate N-D grid in dimension order. The ndgrid function is like meshgrid except that the order of the first two input arguments are switched. That is, the statement [X1,X2,X3] = ndgrid(x1,x2,x3) produces the same result as [X2,X1,X3] = meshgrid(x2,x1,x3) This naming is based on MATLAB, the purpose is to avoid confusion due to torch's change to make torch.meshgrid behaviour move from matching ndgrid ('ij') indexing to numpy meshgrid defaults of ('xy'). """ try: return torch.meshgrid(*tensors, indexing='ij') except TypeError: # old PyTorch < 1.10 will follow this path as it does not have indexing arg, # the old behaviour of meshgrid was 'ij' return torch.meshgrid(*tensors) def meshgrid(*tensors) -> Tuple[torch.Tensor, ...]: """generate N-D grid in spatial dim order. The meshgrid function is similar to ndgrid except that the order of the first two input and output arguments is switched. That is, the statement [X,Y,Z] = meshgrid(x,y,z) produces the same result as [Y,X,Z] = ndgrid(y,x,z) Because of this, meshgrid is better suited to problems in two- or three-dimensional Cartesian space, while ndgrid is better suited to multidimensional problems that aren't spatially based. """ # NOTE: this will throw in PyTorch < 1.10 as meshgrid did not support indexing arg or have # capability of generating grid in xy order before then. return torch.meshgrid(*tensors, indexing='xy')
pytorch-image-models/timm/layers/grid.py/0
{ "file_path": "pytorch-image-models/timm/layers/grid.py", "repo_id": "pytorch-image-models", "token_count": 565 }
255
""" Normalization + Activation Layers Provides Norm+Act fns for standard PyTorch norm layers such as * BatchNorm * GroupNorm * LayerNorm This allows swapping with alternative layers that are natively both norm + act such as * EvoNorm (evo_norm.py) * FilterResponseNorm (filter_response_norm.py) * InplaceABN (inplace_abn.py) Hacked together by / Copyright 2022 Ross Wightman """ from typing import Any, Dict, List, Optional, Type, Union import torch from torch import nn as nn from torch.nn import functional as F from torchvision.ops.misc import FrozenBatchNorm2d from ._fx import register_notrace_module from .create_act import create_act_layer from .fast_norm import ( is_fast_norm, fast_group_norm, fast_layer_norm, fast_rms_norm, rms_norm2d, fast_rms_norm2d, ) from .norm import RmsNorm, RmsNorm2d from .trace_utils import _assert from .typing import LayerType try: from torch.nn.functional import rms_norm except ImportError: from .fast_norm import rms_norm def _create_act( act_layer: LayerType, act_kwargs: Dict[str, Any] = None, inplace: Optional[bool] = False, apply_act: bool = True, ) -> nn.Module: act_kwargs = act_kwargs or {} act_kwargs.setdefault('inplace', inplace) act = None if apply_act: act = create_act_layer(act_layer, **act_kwargs) return nn.Identity() if act is None else act @register_notrace_module class BatchNormAct2d(nn.BatchNorm2d): """BatchNorm + Activation This module performs BatchNorm + Activation in a manner that will remain backwards compatible with weights trained with separate bn, act. This is why we inherit from BN instead of composing it as a .bn member. """ def __init__( self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine: bool = True, track_running_stats: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, device=None, dtype=None, ): try: factory_kwargs = {'device': device, 'dtype': dtype} super(BatchNormAct2d, self).__init__( num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats, **factory_kwargs, ) except TypeError: # NOTE for backwards compat with old PyTorch w/o factory device/dtype support super(BatchNormAct2d, self).__init__( num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats, ) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def forward(self, x): # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)') # exponential_average_factor is set to self.momentum # (when it is available) only so that it gets updated # in ONNX graph when this node is exported to ONNX. if self.momentum is None: exponential_average_factor = 0.0 else: exponential_average_factor = self.momentum if self.training and self.track_running_stats: # TODO: if statement only here to tell the jit to skip emitting this when it is None if self.num_batches_tracked is not None: # type: ignore[has-type] self.num_batches_tracked.add_(1) # type: ignore[has-type] if self.momentum is None: # use cumulative moving average exponential_average_factor = 1.0 / float(self.num_batches_tracked) else: # use exponential moving average exponential_average_factor = self.momentum r""" Decide whether the mini-batch stats should be used for normalization rather than the buffers. Mini-batch stats are used in training mode, and in eval mode when buffers are None. """ if self.training: bn_training = True else: bn_training = (self.running_mean is None) and (self.running_var is None) r""" Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are used for normalization (i.e. in eval mode when buffers are not None). """ x = F.batch_norm( x, # If buffers are not to be tracked, ensure that they won't be updated self.running_mean if not self.training or self.track_running_stats else None, self.running_var if not self.training or self.track_running_stats else None, self.weight, self.bias, bn_training, exponential_average_factor, self.eps, ) x = self.drop(x) x = self.act(x) return x @register_notrace_module class SyncBatchNormAct(nn.SyncBatchNorm): # Thanks to Selim Seferbekov (https://github.com/rwightman/pytorch-image-models/issues/1254) # This is a quick workaround to support SyncBatchNorm for timm BatchNormAct2d layers # but ONLY when used in conjunction with the timm conversion function below. # Do not create this module directly or use the PyTorch conversion function. def forward(self, x: torch.Tensor) -> torch.Tensor: x = super().forward(x) # SyncBN doesn't work with torchscript anyways, so this is fine if hasattr(self, "drop"): x = self.drop(x) if hasattr(self, "act"): x = self.act(x) return x def convert_sync_batchnorm(module, process_group=None): # convert both BatchNorm and BatchNormAct layers to Synchronized variants module_output = module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): if isinstance(module, BatchNormAct2d): # convert timm norm + act layer module_output = SyncBatchNormAct( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group=process_group, ) # set act and drop attr from the original module module_output.act = module.act module_output.drop = module.drop else: # convert standard BatchNorm layers module_output = torch.nn.SyncBatchNorm( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group, ) if module.affine: with torch.no_grad(): module_output.weight = module.weight module_output.bias = module.bias module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = module.num_batches_tracked module_output.training = module.training if hasattr(module, "qconfig"): module_output.qconfig = module.qconfig for name, child in module.named_children(): module_output.add_module(name, convert_sync_batchnorm(child, process_group)) del module return module_output @register_notrace_module class FrozenBatchNormAct2d(torch.nn.Module): """ BatchNormAct2d where the batch statistics and the affine parameters are fixed Args: num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)`` eps (float): a value added to the denominator for numerical stability. Default: 1e-5 """ def __init__( self, num_features: int, eps: float = 1e-5, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, ): super().__init__() self.eps = eps self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("bias", torch.zeros(num_features)) self.register_buffer("running_mean", torch.zeros(num_features)) self.register_buffer("running_var", torch.ones(num_features)) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def _load_from_state_dict( self, state_dict: dict, prefix: str, local_metadata: dict, strict: bool, missing_keys: List[str], unexpected_keys: List[str], error_msgs: List[str], ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x: torch.Tensor) -> torch.Tensor: # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) scale = w * (rv + self.eps).rsqrt() bias = b - rm * scale x = x * scale + bias x = self.act(self.drop(x)) return x def __repr__(self) -> str: return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps}, act={self.act})" def freeze_batch_norm_2d(module): """ Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively. Args: module (torch.nn.Module): Any PyTorch module. Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module if isinstance(module, (BatchNormAct2d, SyncBatchNormAct)): res = FrozenBatchNormAct2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps res.drop = module.drop res.act = module.act elif isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): res = FrozenBatchNorm2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = freeze_batch_norm_2d(child) if new_child is not child: res.add_module(name, new_child) return res def unfreeze_batch_norm_2d(module): """ Converts all `FrozenBatchNorm2d` layers of provided module into `BatchNorm2d`. If `module` is itself and instance of `FrozenBatchNorm2d`, it is converted into `BatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module if isinstance(module, FrozenBatchNormAct2d): res = BatchNormAct2d(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps res.drop = module.drop res.act = module.act elif isinstance(module, FrozenBatchNorm2d): res = torch.nn.BatchNorm2d(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = unfreeze_batch_norm_2d(child) if new_child is not child: res.add_module(name, new_child) return res def _num_groups(num_channels: int, num_groups: int, group_size: int): if group_size: assert num_channels % group_size == 0 return num_channels // group_size return num_groups class GroupNormAct(nn.GroupNorm): _fast_norm: torch.jit.Final[bool] # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args def __init__( self, num_channels: int, num_groups: int = 32, eps: float = 1e-5, affine: bool = True, group_size: Optional[int] = None, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, ): super(GroupNormAct, self).__init__( _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine, ) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class GroupNorm1Act(nn.GroupNorm): _fast_norm: torch.jit.Final[bool] def __init__( self, num_channels: int, eps: float = 1e-5, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, ): super(GroupNorm1Act, self).__init__(1, num_channels, eps=eps, affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct(nn.LayerNorm): _fast_norm: torch.jit.Final[bool] def __init__( self, normalization_shape: Union[int, List[int], torch.Size], eps: float = 1e-5, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, **kwargs, ): super(LayerNormAct, self).__init__(normalization_shape, eps=eps, elementwise_affine=affine, **kwargs) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormActFp32(nn.LayerNorm): def __init__( self, normalization_shape: Union[int, List[int], torch.Size], eps: float = 1e-5, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, **kwargs, ): super().__init__(normalization_shape, eps=eps, elementwise_affine=affine, **kwargs) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def forward(self, x): weight = self.weight.float() if self.weight is not None else None bias = self.bias.float() if self.bias is not None else None x = F.layer_norm(x.float(), self.normalized_shape, weight, bias, self.eps).to(x.dtype) x = self.drop(x) x = self.act(x) return x class LayerNormAct2d(nn.LayerNorm): _fast_norm: torch.jit.Final[bool] def __init__( self, num_channels: int, eps: float = 1e-5, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, **kwargs, ): super().__init__(num_channels, eps=eps, elementwise_affine=affine, **kwargs) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) x = self.drop(x) x = self.act(x) return x class LayerNormAct2dFp32(nn.LayerNorm): def __init__( self, num_channels: int, eps: float = 1e-5, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, **kwargs, ): super().__init__(num_channels, eps=eps, elementwise_affine=affine, **kwargs) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def forward(self, x): x = x.permute(0, 2, 3, 1) weight = self.weight.float() if self.weight is not None else None bias = self.bias.float() if self.bias is not None else None x = F.layer_norm(x.float(), self.normalized_shape, weight, bias, self.eps).to(x.dtype) x = x.permute(0, 3, 1, 2) x = self.drop(x) x = self.act(x) return x class RmsNormAct(RmsNorm): """ RMSNorm + Activation for '2D' NCHW tensors NOTE: It's currently (2025-05-10) faster to use an eager 2d kernel that does reduction on dim=1 than to permute and use internal PyTorch F.rms_norm, this may change if something like https://github.com/pytorch/pytorch/pull/150576 lands. """ def __init__( self, num_channels: int, eps: float = 1e-6, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, **kwargs, ): super().__init__(channels=num_channels, eps=eps, affine=affine, **kwargs) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps) else: x = rms_norm(x, self.normalized_shape, self.weight, self.eps) x = self.drop(x) x = self.act(x) return x class RmsNormActFp32(RmsNorm): """ RMSNorm + Activation for '2D' NCHW tensors NOTE: It's currently (2025-05-10) faster to use an eager 2d kernel that does reduction on dim=1 than to permute and use internal PyTorch F.rms_norm, this may change if something like https://github.com/pytorch/pytorch/pull/150576 lands. """ def __init__( self, num_channels: int, eps: float = 1e-6, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, **kwargs, ): super().__init__(channels=num_channels, eps=eps, affine=affine, **kwargs) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def forward(self, x: torch.Tensor) -> torch.Tensor: weight = self.weight.float() if self.weight is not None else None x = rms_norm(x.float(), self.normalized_shape, weight, self.eps).to(x.dtype) x = self.drop(x) x = self.act(x) return x class RmsNormAct2d(RmsNorm2d): """ RMSNorm + Activation for '2D' NCHW tensors NOTE: It's currently (2025-05-10) faster to use an eager 2d kernel that does reduction on dim=1 than to permute and use internal PyTorch F.rms_norm, this may change if something like https://github.com/pytorch/pytorch/pull/150576 lands. """ def __init__( self, num_channels: int, eps: float = 1e-6, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, ): super().__init__(channels=num_channels, eps=eps, affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_rms_norm2d(x, self.normalized_shape, self.weight, self.eps) else: x = rms_norm2d(x, self.normalized_shape, self.weight, self.eps) x = self.drop(x) x = self.act(x) return x class RmsNormAct2dFp32(RmsNorm2d): """ RMSNorm + Activation for '2D' NCHW tensors NOTE: It's currently (2025-05-10) faster to use an eager 2d kernel that does reduction on dim=1 than to permute and use internal PyTorch F.rms_norm, this may change if something like https://github.com/pytorch/pytorch/pull/150576 lands. """ def __init__( self, num_channels: int, eps: float = 1e-6, affine: bool = True, apply_act: bool = True, act_layer: LayerType = nn.ReLU, act_kwargs: Dict[str, Any] = None, inplace: bool = True, drop_layer: Optional[Type[nn.Module]] = None, ): super().__init__(channels=num_channels, eps=eps, affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def forward(self, x: torch.Tensor) -> torch.Tensor: weight = self.weight.float() if self.weight is not None else None x = rms_norm2d(x.float(), self.normalized_shape, weight, self.eps).to(x.dtype) x = self.drop(x) x = self.act(x) return x
pytorch-image-models/timm/layers/norm_act.py/0
{ "file_path": "pytorch-image-models/timm/layers/norm_act.py", "repo_id": "pytorch-image-models", "token_count": 11966 }
256
""" Test Time Pooling (Average-Max Pool) Hacked together by / Copyright 2020 Ross Wightman """ import logging from torch import nn import torch.nn.functional as F from .adaptive_avgmax_pool import adaptive_avgmax_pool2d _logger = logging.getLogger(__name__) class TestTimePoolHead(nn.Module): def __init__(self, base, original_pool=7): super(TestTimePoolHead, self).__init__() self.base = base self.original_pool = original_pool base_fc = self.base.get_classifier() if isinstance(base_fc, nn.Conv2d): self.fc = base_fc else: self.fc = nn.Conv2d( self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) self.base.reset_classifier(0) # delete original fc layer def forward(self, x): x = self.base.forward_features(x) x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) x = self.fc(x) x = adaptive_avgmax_pool2d(x, 1) return x.view(x.size(0), -1) def apply_test_time_pool(model, config, use_test_size=False): test_time_pool = False if not hasattr(model, 'default_cfg') or not model.default_cfg: return model, False if use_test_size and 'test_input_size' in model.default_cfg: df_input_size = model.default_cfg['test_input_size'] else: df_input_size = model.default_cfg['input_size'] if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: _logger.info('Target input size %s > pretrained default %s, using test time pooling' % (str(config['input_size'][-2:]), str(df_input_size[-2:]))) model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) test_time_pool = True return model, test_time_pool
pytorch-image-models/timm/layers/test_time_pool.py/0
{ "file_path": "pytorch-image-models/timm/layers/test_time_pool.py", "repo_id": "pytorch-image-models", "token_count": 881 }
257
""" Model creation / weight loading / state_dict helpers Hacked together by / Copyright 2020 Ross Wightman """ import logging import os from typing import Any, Callable, Dict, Optional, Union import torch try: import safetensors.torch _has_safetensors = True except ImportError: _has_safetensors = False _logger = logging.getLogger(__name__) __all__ = ['clean_state_dict', 'load_state_dict', 'load_checkpoint', 'remap_state_dict', 'resume_checkpoint'] def _remove_prefix(text: str, prefix: str) -> str: # FIXME replace with 3.9 stdlib fn when min at 3.9 if text.startswith(prefix): return text[len(prefix):] return text def clean_state_dict(state_dict: Dict[str, Any]) -> Dict[str, Any]: # 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training cleaned_state_dict = {} to_remove = ( 'module.', # DDP wrapper '_orig_mod.', # torchcompile dynamo wrapper ) for k, v in state_dict.items(): for r in to_remove: k = _remove_prefix(k, r) cleaned_state_dict[k] = v return cleaned_state_dict def load_state_dict( checkpoint_path: str, use_ema: bool = True, device: Union[str, torch.device] = 'cpu', weights_only: bool = False, ) -> Dict[str, Any]: """Load state dictionary from checkpoint file. Args: checkpoint_path: Path to checkpoint file. use_ema: Whether to use EMA weights if available. device: Device to load checkpoint to. weights_only: Whether to load only weights (torch.load parameter). Returns: State dictionary loaded from checkpoint. """ if checkpoint_path and os.path.isfile(checkpoint_path): # Check if safetensors or not and load weights accordingly if str(checkpoint_path).endswith(".safetensors"): assert _has_safetensors, "`pip install safetensors` to use .safetensors" checkpoint = safetensors.torch.load_file(checkpoint_path, device=device) else: try: checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=weights_only) except TypeError: checkpoint = torch.load(checkpoint_path, map_location=device) state_dict_key = '' if isinstance(checkpoint, dict): if use_ema and checkpoint.get('state_dict_ema', None) is not None: state_dict_key = 'state_dict_ema' elif use_ema and checkpoint.get('model_ema', None) is not None: state_dict_key = 'model_ema' elif 'state_dict' in checkpoint: state_dict_key = 'state_dict' elif 'model' in checkpoint: state_dict_key = 'model' state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint) _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) return state_dict else: _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError() def load_checkpoint( model: torch.nn.Module, checkpoint_path: str, use_ema: bool = True, device: Union[str, torch.device] = 'cpu', strict: bool = True, remap: bool = False, filter_fn: Optional[Callable] = None, weights_only: bool = False, ) -> Any: """Load checkpoint into model. Args: model: Model to load checkpoint into. checkpoint_path: Path to checkpoint file. use_ema: Whether to use EMA weights if available. device: Device to load checkpoint to. strict: Whether to strictly enforce state_dict keys match. remap: Whether to remap state dict keys by order. filter_fn: Optional function to filter state dict. weights_only: Whether to load only weights (torch.load parameter). Returns: Incompatible keys from model.load_state_dict(). """ if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): # numpy checkpoint, try to load via model specific load_pretrained fn if hasattr(model, 'load_pretrained'): model.load_pretrained(checkpoint_path) else: raise NotImplementedError('Model cannot load numpy checkpoint') return state_dict = load_state_dict(checkpoint_path, use_ema, device=device, weights_only=weights_only) if remap: state_dict = remap_state_dict(state_dict, model) elif filter_fn: state_dict = filter_fn(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def remap_state_dict( state_dict: Dict[str, Any], model: torch.nn.Module, allow_reshape: bool = True ) -> Dict[str, Any]: """Remap checkpoint by iterating over state dicts in order (ignoring original keys). This assumes models (and originating state dict) were created with params registered in same order. Args: state_dict: State dict to remap. model: Model whose state dict keys to use. allow_reshape: Whether to allow reshaping tensors to match. Returns: Remapped state dictionary. """ out_dict = {} for (ka, va), (kb, vb) in zip(model.state_dict().items(), state_dict.items()): assert va.numel() == vb.numel(), f'Tensor size mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' if va.shape != vb.shape: if allow_reshape: vb = vb.reshape(va.shape) else: assert False, f'Tensor shape mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.' out_dict[ka] = vb return out_dict def resume_checkpoint( model: torch.nn.Module, checkpoint_path: str, optimizer: Optional[torch.optim.Optimizer] = None, loss_scaler: Optional[Any] = None, log_info: bool = True, ) -> Optional[int]: """Resume training from checkpoint. Args: model: Model to load checkpoint into. checkpoint_path: Path to checkpoint file. optimizer: Optional optimizer to restore state. loss_scaler: Optional AMP loss scaler to restore state. log_info: Whether to log loading info. Returns: Resume epoch number if available, else None. """ resume_epoch = None if os.path.isfile(checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: if log_info: _logger.info('Restoring model state from checkpoint...') state_dict = clean_state_dict(checkpoint['state_dict']) model.load_state_dict(state_dict) if optimizer is not None and 'optimizer' in checkpoint: if log_info: _logger.info('Restoring optimizer state from checkpoint...') optimizer.load_state_dict(checkpoint['optimizer']) if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: if log_info: _logger.info('Restoring AMP loss scaler state from checkpoint...') loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) if 'epoch' in checkpoint: resume_epoch = checkpoint['epoch'] if 'version' in checkpoint and checkpoint['version'] > 1: resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save if log_info: _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) else: model.load_state_dict(checkpoint) if log_info: _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) return resume_epoch else: _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError()
pytorch-image-models/timm/models/_helpers.py/0
{ "file_path": "pytorch-image-models/timm/models/_helpers.py", "repo_id": "pytorch-image-models", "token_count": 3370 }
258
""" ConViT Model @article{d2021convit, title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases}, author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent}, journal={arXiv preprint arXiv:2103.10697}, year={2021} } Paper link: https://arxiv.org/abs/2103.10697 Original code: https://github.com/facebookresearch/convit, original copyright below Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the CC-by-NC license found in the # LICENSE file in the root directory of this source tree. # '''These modules are adapted from those of timm, see https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ''' from typing import Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, PatchEmbed, Mlp, LayerNorm, HybridEmbed from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs __all__ = ['ConVit'] @register_notrace_module # reason: FX can't symbolically trace control flow in forward method class GPSA(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., locality_strength=1., ): super().__init__() self.num_heads = num_heads self.dim = dim head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.locality_strength = locality_strength self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.pos_proj = nn.Linear(3, num_heads) self.proj_drop = nn.Dropout(proj_drop) self.gating_param = nn.Parameter(torch.ones(self.num_heads)) self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None def forward(self, x): B, N, C = x.shape if self.rel_indices is None or self.rel_indices.shape[1] != N: self.rel_indices = self.get_rel_indices(N) attn = self.get_attention(x) v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x def get_attention(self, x): B, N, C = x.shape qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k = qk[0], qk[1] pos_score = self.rel_indices.expand(B, -1, -1, -1) pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) patch_score = (q @ k.transpose(-2, -1)) * self.scale patch_score = patch_score.softmax(dim=-1) pos_score = pos_score.softmax(dim=-1) gating = self.gating_param.view(1, -1, 1, 1) attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score attn /= attn.sum(dim=-1).unsqueeze(-1) attn = self.attn_drop(attn) return attn def get_attention_map(self, x, return_map=False): attn_map = self.get_attention(x).mean(0) # average over batch distances = self.rel_indices.squeeze()[:, :, -1] ** .5 dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) if return_map: return dist, attn_map else: return dist def local_init(self): self.v.weight.data.copy_(torch.eye(self.dim)) locality_distance = 1 # max(1,1/locality_strength**.5) kernel_size = int(self.num_heads ** .5) center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 for h1 in range(kernel_size): for h2 in range(kernel_size): position = h1 + kernel_size * h2 self.pos_proj.weight.data[position, 2] = -1 self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance self.pos_proj.weight.data *= self.locality_strength def get_rel_indices(self, num_patches: int) -> torch.Tensor: img_size = int(num_patches ** .5) rel_indices = torch.zeros(1, num_patches, num_patches, 3) ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size, img_size) indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) indd = indx ** 2 + indy ** 2 rel_indices[:, :, :, 2] = indd.unsqueeze(0) rel_indices[:, :, :, 1] = indy.unsqueeze(0) rel_indices[:, :, :, 0] = indx.unsqueeze(0) device = self.qk.weight.device return rel_indices.to(device) class MHSA(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def get_attention_map(self, x, return_map=False): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn_map = (q @ k.transpose(-2, -1)) * self.scale attn_map = attn_map.softmax(dim=-1).mean(0) img_size = int(N ** .5) ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) indx = ind.repeat(img_size, img_size) indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) indd = indx ** 2 + indy ** 2 distances = indd ** .5 distances = distances.to(x.device) dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N if return_map: return dist, attn_map else: return dist def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm, use_gpsa=True, locality_strength=1., ): super().__init__() self.norm1 = norm_layer(dim) self.use_gpsa = use_gpsa if self.use_gpsa: self.attn = GPSA( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, locality_strength=locality_strength, ) else: self.attn = MHSA( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConVit(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=LayerNorm, local_up_to_layer=3, locality_strength=1., use_pos_embed=True, ): super().__init__() assert global_pool in ('', 'avg', 'token') embed_dim *= num_heads self.num_classes = num_classes self.global_pool = global_pool self.local_up_to_layer = local_up_to_layer self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.locality_strength = locality_strength self.use_pos_embed = use_pos_embed if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches self.num_patches = num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_rate) if self.use_pos_embed: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) trunc_normal_(self.pos_embed, std=.02) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, use_gpsa=i < local_up_to_layer, locality_strength=locality_strength, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) # Classifier head self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) for n, m in self.named_modules(): if hasattr(m, 'local_init'): m.local_init() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'token', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) if self.use_pos_embed: x = x + self.pos_embed x = self.pos_drop(x) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) for u, blk in enumerate(self.blocks): if u == self.local_up_to_layer: x = torch.cat((cls_tokens, x), dim=1) x = blk(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_convit(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') return build_model_with_cfg(ConVit, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # ConViT 'convit_tiny.fb_in1k': _cfg(hf_hub_id='timm/'), 'convit_small.fb_in1k': _cfg(hf_hub_id='timm/'), 'convit_base.fb_in1k': _cfg(hf_hub_id='timm/') }) @register_model def convit_tiny(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=4) model = _create_convit(variant='convit_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convit_small(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=9) model = _create_convit(variant='convit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convit_base(pretrained=False, **kwargs) -> ConVit: model_args = dict( local_up_to_layer=10, locality_strength=1.0, embed_dim=48, num_heads=16) model = _create_convit(variant='convit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/convit.py/0
{ "file_path": "pytorch-image-models/timm/models/convit.py", "repo_id": "pytorch-image-models", "token_count": 7721 }
259