sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
huggingface/diffusers:src/diffusers/pipelines/sana/pipeline_sana_sprint.py
# Copyright 2025 SANA-Sprint Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html import inspect import re import urllib.parse as ul import warnings from typing import Any, Callable import torch from transformers import Gemma2PreTrainedModel, GemmaTokenizer, GemmaTokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PixArtImageProcessor from ...loaders import SanaLoraLoaderMixin from ...models import AutoencoderDC, SanaTransformer2DModel from ...schedulers import DPMSolverMultistepScheduler from ...utils import ( BACKENDS_MAPPING, USE_PEFT_BACKEND, deprecate, is_bs4_available, is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import get_device, is_torch_version, randn_tensor from ..pipeline_utils import DiffusionPipeline from ..pixart_alpha.pipeline_pixart_alpha import ASPECT_RATIO_1024_BIN from .pipeline_output import SanaPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import SanaSprintPipeline >>> pipe = SanaSprintPipeline.from_pretrained( ... "Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers", torch_dtype=torch.bfloat16 ... ) >>> pipe.to("cuda") >>> image = pipe(prompt="a tiny astronaut hatching from an egg on the moon")[0] >>> image[0].save("output.png") ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class SanaSprintPipeline(DiffusionPipeline, SanaLoraLoaderMixin): r""" Pipeline for text-to-image generation using [SANA-Sprint](https://huggingface.co/papers/2503.09641). """ # fmt: off bad_punct_regex = re.compile(r"[" + "#®•©™&@·º½¾¿¡§~" + r"\)" + r"\(" + r"\]" + r"\[" + r"\}" + r"\{" + r"\|" + "\\" + r"\/" + r"\*" + r"]{1,}") # fmt: on model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, tokenizer: GemmaTokenizer | GemmaTokenizerFast, text_encoder: Gemma2PreTrainedModel, vae: AutoencoderDC, transformer: SanaTransformer2DModel, scheduler: DPMSolverMultistepScheduler, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.encoder_block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 32 ) self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." deprecate( "enable_vae_slicing", "0.40.0", depr_message, ) self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." deprecate( "disable_vae_slicing", "0.40.0", depr_message, ) self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." deprecate( "enable_vae_tiling", "0.40.0", depr_message, ) self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." deprecate( "disable_vae_tiling", "0.40.0", depr_message, ) self.vae.disable_tiling() # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline._get_gemma_prompt_embeds def _get_gemma_prompt_embeds( self, prompt: str | list[str], device: torch.device, dtype: torch.dtype, clean_caption: bool = False, max_sequence_length: int = 300, complex_human_instruction: list[str] | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`, *optional*): torch device to place the resulting embeddings on clean_caption (`bool`, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt. complex_human_instruction (`list[str]`, defaults to `complex_human_instruction`): If `complex_human_instruction` is not empty, the function will use the complex Human instruction for the prompt. """ prompt = [prompt] if isinstance(prompt, str) else prompt if getattr(self, "tokenizer", None) is not None: self.tokenizer.padding_side = "right" prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) # prepare complex human instruction if not complex_human_instruction: max_length_all = max_sequence_length else: chi_prompt = "\n".join(complex_human_instruction) prompt = [chi_prompt + p for p in prompt] num_chi_prompt_tokens = len(self.tokenizer.encode(chi_prompt)) max_length_all = num_chi_prompt_tokens + max_sequence_length - 2 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length_all, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0].to(dtype=dtype, device=device) return prompt_embeds, prompt_attention_mask def encode_prompt( self, prompt: str | list[str], num_images_per_prompt: int = 1, device: torch.device | None = None, prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, clean_caption: bool = False, max_sequence_length: int = 300, complex_human_instruction: list[str] | None = None, lora_scale: float | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. clean_caption (`bool`, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt. complex_human_instruction (`list[str]`, defaults to `complex_human_instruction`): If `complex_human_instruction` is not empty, the function will use the complex Human instruction for the prompt. """ if device is None: device = self._execution_device if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = None # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, SanaLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if getattr(self, "tokenizer", None) is not None: self.tokenizer.padding_side = "right" # See Section 3.1. of the paper. max_length = max_sequence_length select_index = [0] + list(range(-max_length + 1, 0)) if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=prompt, device=device, dtype=dtype, clean_caption=clean_caption, max_sequence_length=max_sequence_length, complex_human_instruction=complex_human_instruction, ) prompt_embeds = prompt_embeds[:, select_index] prompt_attention_mask = prompt_attention_mask[:, select_index] bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) if self.text_encoder is not None: if isinstance(self, SanaLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, num_inference_steps, timesteps, max_timesteps, intermediate_timesteps, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if timesteps is not None and len(timesteps) != num_inference_steps + 1: raise ValueError("If providing custom timesteps, `timesteps` must be of length `num_inference_steps + 1`.") if timesteps is not None and max_timesteps is not None: raise ValueError("If providing custom timesteps, `max_timesteps` should not be provided.") if timesteps is None and max_timesteps is None: raise ValueError("Should provide either `timesteps` or `max_timesteps`.") if intermediate_timesteps is not None and num_inference_steps != 2: raise ValueError("Intermediate timesteps for SCM is not supported when num_inference_steps != 2.") # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub("<person>", "person", caption) # urls: caption = re.sub( r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls caption = re.sub( r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls # html: caption = BeautifulSoup(caption, features="html.parser").text # @<nickname> caption = re.sub(r"@[\w\d]+\b", "", caption) # 31C0—31EF CJK Strokes # 31F0—31FF Katakana Phonetic Extensions # 3200—32FF Enclosed CJK Letters and Months # 3300—33FF CJK Compatibility # 3400—4DBF CJK Unified Ideographs Extension A # 4DC0—4DFF Yijing Hexagram Symbols # 4E00—9FFF CJK Unified Ideographs caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) caption = re.sub(r"[\u3200-\u32ff]+", "", caption) caption = re.sub(r"[\u3300-\u33ff]+", "", caption) caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) ####################################################### # все виды тире / all types of dash --> "-" caption = re.sub( r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa "-", caption, ) # кавычки к одному стандарту caption = re.sub(r"[`´«»“”¨]", '"', caption) caption = re.sub(r"[‘’]", "'", caption) # &quot; caption = re.sub(r"&quot;?", "", caption) # &amp caption = re.sub(r"&amp", "", caption) # ip addresses: caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) # article ids: caption = re.sub(r"\d:\d\d\s+$", "", caption) # \n caption = re.sub(r"\\n", " ", caption) # "#123" caption = re.sub(r"#\d{1,3}\b", "", caption) # "#12345.." caption = re.sub(r"#\d{5,}\b", "", caption) # "123456.." caption = re.sub(r"\b\d{6,}\b", "", caption) # filenames: caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) # caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " # this-is-my-cute-cat / this_is_my_cute_cat regex2 = re.compile(r"(?:\-|\_)") if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, " ", caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) caption = re.sub(r"\bpage\s+\d+\b", "", caption) caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) caption = re.sub(r"\b\s+\:\s+", r": ", caption) caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) caption = re.sub(r"\s+", " ", caption) caption.strip() caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) caption = re.sub(r"^\.\S+$", "", caption) return caption.strip() # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def attention_kwargs(self): return self._attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, num_inference_steps: int = 2, timesteps: list[int] = None, max_timesteps: float = 1.57080, intermediate_timesteps: float = 1.3, guidance_scale: float = 4.5, num_images_per_prompt: int | None = 1, height: int = 1024, width: int = 1024, eta: float = 0.0, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, clean_caption: bool = False, use_resolution_binning: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 300, complex_human_instruction: list[str] = [ "Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for image generation. Evaluate the level of detail in the user prompt:", "- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, and spatial relationships to create vivid and concrete scenes.", "- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.", "Here are examples of how to transform or refine prompts:", "- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat curled up in a round shape, sleeping peacefully on a warm sunny windowsill, surrounded by pots of blooming red flowers.", "- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps, a diverse crowd of people in colorful clothing, and a double-decker bus passing by towering glass skyscrapers.", "Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:", "User Prompt: ", ], ) -> SanaPipelineOutput | tuple: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. num_inference_steps (`int`, *optional*, defaults to 20): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. max_timesteps (`float`, *optional*, defaults to 1.57080): The maximum timestep value used in the SCM scheduler. intermediate_timesteps (`float`, *optional*, defaults to 1.3): The intermediate timestep value used in SCM scheduler (only used when num_inference_steps=2). timesteps (`list[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 4.5): Embedded guiddance scale is enabled by setting `guidance_scale` > 1. Higher `guidance_scale` encourages a model to generate images more aligned with `prompt` at the expense of lower image quality. Guidance-distilled models approximates true classifer-free guidance for `guidance_scale` > 1. Refer to the [paper](https://huggingface.co/papers/2210.03142) to learn more. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated image. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. attention_kwargs: A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. use_resolution_binning (`bool` defaults to `True`): If set to `True`, the requested height and width are first mapped to the closest resolutions using `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to the requested resolution. Useful for generating non-square images. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `300`): Maximum sequence length to use with the `prompt`. complex_human_instruction (`list[str]`, *optional*): Instructions for complex human attention: https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55. Examples: Returns: [`~pipelines.sana.pipeline_output.SanaPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.sana.pipeline_output.SanaPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct if use_resolution_binning: if self.transformer.config.sample_size == 32: aspect_ratio_bin = ASPECT_RATIO_1024_BIN else: raise ValueError("Invalid sample size") orig_height, orig_width = height, width height, width = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) self.check_inputs( prompt=prompt, height=height, width=width, num_inference_steps=num_inference_steps, timesteps=timesteps, max_timesteps=max_timesteps, intermediate_timesteps=intermediate_timesteps, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, prompt_attention_mask=prompt_attention_mask, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False # 2. Default height and width to transformer if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.attention_kwargs.get("scale", None) if self.attention_kwargs is not None else None # 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, ) = self.encode_prompt( prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, prompt_attention_mask=prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length, complex_human_instruction=complex_human_instruction, lora_scale=lora_scale, ) # 4. Prepare timesteps if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps, sigmas=None, max_timesteps=max_timesteps, intermediate_timesteps=intermediate_timesteps, ) if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(0) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, torch.float32, device, generator, latents, ) latents = latents * self.scheduler.config.sigma_data guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]).to(prompt_embeds.dtype) guidance = guidance * self.transformer.config.guidance_embeds_scale # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop timesteps = timesteps[:-1] num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) transformer_dtype = self.transformer.dtype with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]) latents_model_input = latents / self.scheduler.config.sigma_data scm_timestep = torch.sin(timestep) / (torch.cos(timestep) + torch.sin(timestep)) scm_timestep_expanded = scm_timestep.view(-1, 1, 1, 1) latent_model_input = latents_model_input * torch.sqrt( scm_timestep_expanded**2 + (1 - scm_timestep_expanded) ** 2 ) # predict noise model_output noise_pred = self.transformer( latent_model_input.to(dtype=transformer_dtype), encoder_hidden_states=prompt_embeds.to(dtype=transformer_dtype), encoder_attention_mask=prompt_attention_mask, guidance=guidance, timestep=scm_timestep, return_dict=False, attention_kwargs=self.attention_kwargs, )[0] noise_pred = ( (1 - 2 * scm_timestep_expanded) * latent_model_input + (1 - 2 * scm_timestep_expanded + 2 * scm_timestep_expanded**2) * noise_pred ) / torch.sqrt(scm_timestep_expanded**2 + (1 - scm_timestep_expanded) ** 2) noise_pred = noise_pred.float() * self.scheduler.config.sigma_data # compute previous image: x_t -> x_t-1 latents, denoised = self.scheduler.step( noise_pred, timestep, latents, **extra_step_kwargs, return_dict=False ) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() latents = denoised / self.scheduler.config.sigma_data if output_type == "latent": image = latents else: latents = latents.to(self.vae.dtype) torch_accelerator_module = getattr(torch, get_device(), torch.cuda) oom_error = ( torch.OutOfMemoryError if is_torch_version(">=", "2.5.0") else torch_accelerator_module.OutOfMemoryError ) try: image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] except oom_error as e: warnings.warn( f"{e}. \n" f"Try to use VAE tiling for large images. For example: \n" f"pipe.vae.enable_tiling(tile_sample_min_width=512, tile_sample_min_height=512)" ) if use_resolution_binning: image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) if not output_type == "latent": image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return SanaPipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/sana/pipeline_sana_sprint.py", "license": "Apache License 2.0", "lines": 792, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/schedulers/scheduling_scm.py
# # Copyright 2025 Sana-Sprint Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion from dataclasses import dataclass import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..schedulers.scheduling_utils import SchedulerMixin from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->SCM class SCMSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor pred_original_sample: torch.Tensor | None = None class SCMScheduler(SchedulerMixin, ConfigMixin): """ `SCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with non-Markovian guidance. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. prediction_type (`str`, defaults to `trigflow`): Prediction type of the scheduler function. Currently only supports "trigflow". sigma_data (`float`, defaults to 0.5): The standard deviation of the noise added during multi-step inference. """ # _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, prediction_type: str = "trigflow", sigma_data: float = 0.5, ): """ Initialize the SCM scheduler. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. prediction_type (`str`, defaults to `trigflow`): Prediction type of the scheduler function. Currently only supports "trigflow". sigma_data (`float`, defaults to 0.5): The standard deviation of the noise added during multi-step inference. """ # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) self._step_index = None self._begin_index = None @property def step_index(self): return self._step_index @property def begin_index(self): return self._begin_index # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index def set_begin_index(self, begin_index: int = 0): """ Sets the begin index for the scheduler. This function should be run from pipeline before the inference. Args: begin_index (`int`, defaults to `0`): The begin index for the scheduler. """ self._begin_index = begin_index def set_timesteps( self, num_inference_steps: int, timesteps: torch.Tensor = None, device: str | torch.device = None, max_timesteps: float = 1.57080, intermediate_timesteps: float = 1.3, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. timesteps (`torch.Tensor`, *optional*): Custom timesteps to use for the denoising process. max_timesteps (`float`, defaults to 1.57080): The maximum timestep value used in the SCM scheduler. intermediate_timesteps (`float`, *optional*, defaults to 1.3): The intermediate timestep value used in SCM scheduler (only used when num_inference_steps=2). """ if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) if timesteps is not None and len(timesteps) != num_inference_steps + 1: raise ValueError("If providing custom timesteps, `timesteps` must be of length `num_inference_steps + 1`.") if timesteps is not None and max_timesteps is not None: raise ValueError("If providing custom timesteps, `max_timesteps` should not be provided.") if timesteps is None and max_timesteps is None: raise ValueError("Should provide either `timesteps` or `max_timesteps`.") if intermediate_timesteps is not None and num_inference_steps != 2: raise ValueError("Intermediate timesteps for SCM is not supported when num_inference_steps != 2.") self.num_inference_steps = num_inference_steps if timesteps is not None: if isinstance(timesteps, list): self.timesteps = torch.tensor(timesteps, device=device).float() elif isinstance(timesteps, torch.Tensor): self.timesteps = timesteps.to(device).float() else: raise ValueError(f"Unsupported timesteps type: {type(timesteps)}") elif intermediate_timesteps is not None: self.timesteps = torch.tensor([max_timesteps, intermediate_timesteps, 0], device=device).float() else: # max_timesteps=arctan(80/0.5)=1.56454 is the default from sCM paper, we choose a different value here self.timesteps = torch.linspace(max_timesteps, 0, num_inference_steps + 1, device=device).float() self._step_index = None self._begin_index = None # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index def _init_step_index(self, timestep: float | torch.Tensor) -> None: """ Initialize the step index for the scheduler based on the given timestep. Args: timestep (`float` or `torch.Tensor`): The current timestep to initialize the step index from. """ if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.index_for_timestep def index_for_timestep( self, timestep: float | torch.Tensor, schedule_timesteps: torch.Tensor | None = None ) -> int: """ Find the index of a given timestep in the timestep schedule. Args: timestep (`float` or `torch.Tensor`): The timestep value to find in the schedule. schedule_timesteps (`torch.Tensor`, *optional*): The timestep schedule to search in. If `None`, uses `self.timesteps`. Returns: `int`: The index of the timestep in the schedule. For the very first step, returns the second index if multiple matches exist to avoid skipping a sigma when starting mid-schedule (e.g., for image-to-image). """ if schedule_timesteps is None: schedule_timesteps = self.timesteps indices = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) pos = 1 if len(indices) > 1 else 0 return indices[pos].item() def step( self, model_output: torch.FloatTensor, timestep: float, sample: torch.FloatTensor, generator: torch.Generator = None, return_dict: bool = True, ) -> SCMSchedulerOutput | tuple: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_scm.SCMSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.SCMSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_scm.SCMSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if self.step_index is None: self._init_step_index(timestep) # 2. compute alphas, betas t = self.timesteps[self.step_index + 1] s = self.timesteps[self.step_index] # 4. Different Parameterization: parameterization = self.config.prediction_type if parameterization == "trigflow": pred_x0 = torch.cos(s) * sample - torch.sin(s) * model_output else: raise ValueError(f"Unsupported parameterization: {parameterization}") # 5. Sample z ~ N(0, I), For MultiStep Inference # Noise is not used for one-step sampling. if len(self.timesteps) > 1: noise = ( randn_tensor(model_output.shape, device=model_output.device, generator=generator) * self.config.sigma_data ) prev_sample = torch.cos(t) * pred_x0 + torch.sin(t) * noise else: prev_sample = pred_x0 self._step_index += 1 if not return_dict: return (prev_sample, pred_x0) return SCMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_x0) def __len__(self): return self.config.num_train_timesteps
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/schedulers/scheduling_scm.py", "license": "Apache License 2.0", "lines": 236, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/sana/test_sana_sprint.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer from diffusers import AutoencoderDC, SanaSprintPipeline, SanaTransformer2DModel, SCMScheduler from ...testing_utils import IS_GITHUB_ACTIONS, enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class SanaSprintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = SanaSprintPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "negative_prompt", "negative_prompt_embeds"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {"negative_prompt"} image_params = TEXT_TO_IMAGE_IMAGE_PARAMS - {"negative_prompt"} image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): torch.manual_seed(0) transformer = SanaTransformer2DModel( patch_size=1, in_channels=4, out_channels=4, num_layers=1, num_attention_heads=2, attention_head_dim=4, num_cross_attention_heads=2, cross_attention_head_dim=4, cross_attention_dim=8, caption_channels=8, sample_size=32, qk_norm="rms_norm_across_heads", guidance_embeds=True, ) torch.manual_seed(0) vae = AutoencoderDC( in_channels=3, latent_channels=4, attention_head_dim=2, encoder_block_types=( "ResBlock", "EfficientViTBlock", ), decoder_block_types=( "ResBlock", "EfficientViTBlock", ), encoder_block_out_channels=(8, 8), decoder_block_out_channels=(8, 8), encoder_qkv_multiscales=((), (5,)), decoder_qkv_multiscales=((), (5,)), encoder_layers_per_block=(1, 1), decoder_layers_per_block=[1, 1], downsample_block_type="conv", upsample_block_type="interpolate", decoder_norm_types="rms_norm", decoder_act_fns="silu", scaling_factor=0.41407, ) torch.manual_seed(0) scheduler = SCMScheduler() torch.manual_seed(0) text_encoder_config = Gemma2Config( head_dim=16, hidden_size=8, initializer_range=0.02, intermediate_size=64, max_position_embeddings=8192, model_type="gemma2", num_attention_heads=2, num_hidden_layers=1, num_key_value_heads=2, vocab_size=8, attn_implementation="eager", ) text_encoder = Gemma2Model(text_encoder_config) tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 32, "width": 32, "max_sequence_length": 16, "output_type": "pt", "complex_human_instruction": None, } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs)[0] generated_image = image[0] self.assertEqual(generated_image.shape, (3, 32, 32)) expected_image = torch.randn(3, 32, 32) max_diff = np.abs(generated_image - expected_image).max() self.assertLessEqual(max_diff, 1e10) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling pipe.vae.enable_tiling( tile_sample_min_height=96, tile_sample_min_width=96, tile_sample_stride_height=64, tile_sample_stride_width=64, ) inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", ) # TODO(aryan): Create a dummy gemma model with smol vocab size @unittest.skip( "A very small vocab size is used for fast tests. So, Any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_consistent(self): pass @unittest.skip( "A very small vocab size is used for fast tests. So, Any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_single_identical(self): pass def test_float16_inference(self): # Requires higher tolerance as model seems very sensitive to dtype super().test_float16_inference(expected_max_diff=0.08) @unittest.skipIf(IS_GITHUB_ACTIONS, reason="Skipping test inside GitHub Actions environment") def test_layerwise_casting_inference(self): super().test_layerwise_casting_inference()
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/sana/test_sana_sprint.py", "license": "Apache License 2.0", "lines": 257, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/hooks/faster_cache.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from dataclasses import dataclass from typing import Any, Callable import torch from ..models.attention import AttentionModuleMixin from ..models.modeling_outputs import Transformer2DModelOutput from ..utils import logging from ._common import _ATTENTION_CLASSES from .hooks import HookRegistry, ModelHook logger = logging.get_logger(__name__) # pylint: disable=invalid-name _FASTER_CACHE_DENOISER_HOOK = "faster_cache_denoiser" _FASTER_CACHE_BLOCK_HOOK = "faster_cache_block" _SPATIAL_ATTENTION_BLOCK_IDENTIFIERS = ( "^blocks.*attn", "^transformer_blocks.*attn", "^single_transformer_blocks.*attn", ) _TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS = ("^temporal_transformer_blocks.*attn",) _TRANSFORMER_BLOCK_IDENTIFIERS = _SPATIAL_ATTENTION_BLOCK_IDENTIFIERS + _TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS _UNCOND_COND_INPUT_KWARGS_IDENTIFIERS = ( "hidden_states", "encoder_hidden_states", "timestep", "attention_mask", "encoder_attention_mask", ) @dataclass class FasterCacheConfig: r""" Configuration for [FasterCache](https://huggingface.co/papers/2410.19355). Attributes: spatial_attention_block_skip_range (`int`, defaults to `2`): Calculate the attention states every `N` iterations. If this is set to `N`, the attention computation will be skipped `N - 1` times (i.e., cached attention states will be reused) before computing the new attention states again. temporal_attention_block_skip_range (`int`, *optional*, defaults to `None`): Calculate the attention states every `N` iterations. If this is set to `N`, the attention computation will be skipped `N - 1` times (i.e., cached attention states will be reused) before computing the new attention states again. spatial_attention_timestep_skip_range (`tuple[float, float]`, defaults to `(-1, 681)`): The timestep range within which the spatial attention computation can be skipped without a significant loss in quality. This is to be determined by the user based on the underlying model. The first value in the tuple is the lower bound and the second value is the upper bound. Typically, diffusion timesteps for denoising are in the reversed range of 0 to 1000 (i.e. denoising starts at timestep 1000 and ends at timestep 0). For the default values, this would mean that the spatial attention computation skipping will be applicable only after denoising timestep 681 is reached, and continue until the end of the denoising process. temporal_attention_timestep_skip_range (`tuple[float, float]`, *optional*, defaults to `None`): The timestep range within which the temporal attention computation can be skipped without a significant loss in quality. This is to be determined by the user based on the underlying model. The first value in the tuple is the lower bound and the second value is the upper bound. Typically, diffusion timesteps for denoising are in the reversed range of 0 to 1000 (i.e. denoising starts at timestep 1000 and ends at timestep 0). low_frequency_weight_update_timestep_range (`tuple[int, int]`, defaults to `(99, 901)`): The timestep range within which the low frequency weight scaling update is applied. The first value in the tuple is the lower bound and the second value is the upper bound of the timestep range. The callback function for the update is called only within this range. high_frequency_weight_update_timestep_range (`tuple[int, int]`, defaults to `(-1, 301)`): The timestep range within which the high frequency weight scaling update is applied. The first value in the tuple is the lower bound and the second value is the upper bound of the timestep range. The callback function for the update is called only within this range. alpha_low_frequency (`float`, defaults to `1.1`): The weight to scale the low frequency updates by. This is used to approximate the unconditional branch from the conditional branch outputs. alpha_high_frequency (`float`, defaults to `1.1`): The weight to scale the high frequency updates by. This is used to approximate the unconditional branch from the conditional branch outputs. unconditional_batch_skip_range (`int`, defaults to `5`): Process the unconditional branch every `N` iterations. If this is set to `N`, the unconditional branch computation will be skipped `N - 1` times (i.e., cached unconditional branch states will be reused) before computing the new unconditional branch states again. unconditional_batch_timestep_skip_range (`tuple[float, float]`, defaults to `(-1, 641)`): The timestep range within which the unconditional branch computation can be skipped without a significant loss in quality. This is to be determined by the user based on the underlying model. The first value in the tuple is the lower bound and the second value is the upper bound. spatial_attention_block_identifiers (`tuple[str, ...]`, defaults to `("blocks.*attn1", "transformer_blocks.*attn1", "single_transformer_blocks.*attn1")`): The identifiers to match the spatial attention blocks in the model. If the name of the block contains any of these identifiers, FasterCache will be applied to that block. This can either be the full layer names, partial layer names, or regex patterns. Matching will always be done using a regex match. temporal_attention_block_identifiers (`tuple[str, ...]`, defaults to `("temporal_transformer_blocks.*attn1",)`): The identifiers to match the temporal attention blocks in the model. If the name of the block contains any of these identifiers, FasterCache will be applied to that block. This can either be the full layer names, partial layer names, or regex patterns. Matching will always be done using a regex match. attention_weight_callback (`Callable[[torch.nn.Module], float]`, defaults to `None`): The callback function to determine the weight to scale the attention outputs by. This function should take the attention module as input and return a float value. This is used to approximate the unconditional branch from the conditional branch outputs. If not provided, the default weight is 0.5 for all timesteps. Typically, as described in the paper, this weight should gradually increase from 0 to 1 as the inference progresses. Users are encouraged to experiment and provide custom weight schedules that take into account the number of inference steps and underlying model behaviour as denoising progresses. low_frequency_weight_callback (`Callable[[torch.nn.Module], float]`, defaults to `None`): The callback function to determine the weight to scale the low frequency updates by. If not provided, the default weight is 1.1 for timesteps within the range specified (as described in the paper). high_frequency_weight_callback (`Callable[[torch.nn.Module], float]`, defaults to `None`): The callback function to determine the weight to scale the high frequency updates by. If not provided, the default weight is 1.1 for timesteps within the range specified (as described in the paper). tensor_format (`str`, defaults to `"BCFHW"`): The format of the input tensors. This should be one of `"BCFHW"`, `"BFCHW"`, or `"BCHW"`. The format is used to split individual latent frames in order for low and high frequency components to be computed. is_guidance_distilled (`bool`, defaults to `False`): Whether the model is guidance distilled or not. If the model is guidance distilled, FasterCache will not be applied at the denoiser-level to skip the unconditional branch computation (as there is none). _unconditional_conditional_input_kwargs_identifiers (`list[str]`, defaults to `("hidden_states", "encoder_hidden_states", "timestep", "attention_mask", "encoder_attention_mask")`): The identifiers to match the input kwargs that contain the batchwise-concatenated unconditional and conditional inputs. If the name of the input kwargs contains any of these identifiers, FasterCache will split the inputs into unconditional and conditional branches. This must be a list of exact input kwargs names that contain the batchwise-concatenated unconditional and conditional inputs. """ # In the paper and codebase, they hardcode these values to 2. However, it can be made configurable # after some testing. We default to 2 if these parameters are not provided. spatial_attention_block_skip_range: int = 2 temporal_attention_block_skip_range: int | None = None spatial_attention_timestep_skip_range: tuple[int, int] = (-1, 681) temporal_attention_timestep_skip_range: tuple[int, int] = (-1, 681) # Indicator functions for low/high frequency as mentioned in Equation 11 of the paper low_frequency_weight_update_timestep_range: tuple[int, int] = (99, 901) high_frequency_weight_update_timestep_range: tuple[int, int] = (-1, 301) # ⍺1 and ⍺2 as mentioned in Equation 11 of the paper alpha_low_frequency: float = 1.1 alpha_high_frequency: float = 1.1 # n as described in CFG-Cache explanation in the paper - dependent on the model unconditional_batch_skip_range: int = 5 unconditional_batch_timestep_skip_range: tuple[int, int] = (-1, 641) spatial_attention_block_identifiers: tuple[str, ...] = _SPATIAL_ATTENTION_BLOCK_IDENTIFIERS temporal_attention_block_identifiers: tuple[str, ...] = _TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS attention_weight_callback: Callable[[torch.nn.Module], float] = None low_frequency_weight_callback: Callable[[torch.nn.Module], float] = None high_frequency_weight_callback: Callable[[torch.nn.Module], float] = None tensor_format: str = "BCFHW" is_guidance_distilled: bool = False current_timestep_callback: Callable[[], int] = None _unconditional_conditional_input_kwargs_identifiers: list[str] = _UNCOND_COND_INPUT_KWARGS_IDENTIFIERS def __repr__(self) -> str: return ( f"FasterCacheConfig(\n" f" spatial_attention_block_skip_range={self.spatial_attention_block_skip_range},\n" f" temporal_attention_block_skip_range={self.temporal_attention_block_skip_range},\n" f" spatial_attention_timestep_skip_range={self.spatial_attention_timestep_skip_range},\n" f" temporal_attention_timestep_skip_range={self.temporal_attention_timestep_skip_range},\n" f" low_frequency_weight_update_timestep_range={self.low_frequency_weight_update_timestep_range},\n" f" high_frequency_weight_update_timestep_range={self.high_frequency_weight_update_timestep_range},\n" f" alpha_low_frequency={self.alpha_low_frequency},\n" f" alpha_high_frequency={self.alpha_high_frequency},\n" f" unconditional_batch_skip_range={self.unconditional_batch_skip_range},\n" f" unconditional_batch_timestep_skip_range={self.unconditional_batch_timestep_skip_range},\n" f" spatial_attention_block_identifiers={self.spatial_attention_block_identifiers},\n" f" temporal_attention_block_identifiers={self.temporal_attention_block_identifiers},\n" f" tensor_format={self.tensor_format},\n" f")" ) class FasterCacheDenoiserState: r""" State for [FasterCache](https://huggingface.co/papers/2410.19355) top-level denoiser module. """ def __init__(self) -> None: self.iteration: int = 0 self.low_frequency_delta: torch.Tensor = None self.high_frequency_delta: torch.Tensor = None def reset(self): self.iteration = 0 self.low_frequency_delta = None self.high_frequency_delta = None class FasterCacheBlockState: r""" State for [FasterCache](https://huggingface.co/papers/2410.19355). Every underlying block that FasterCache is applied to will have an instance of this state. """ def __init__(self) -> None: self.iteration: int = 0 self.batch_size: int = None self.cache: tuple[torch.Tensor, torch.Tensor] = None def reset(self): self.iteration = 0 self.batch_size = None self.cache = None class FasterCacheDenoiserHook(ModelHook): _is_stateful = True def __init__( self, unconditional_batch_skip_range: int, unconditional_batch_timestep_skip_range: tuple[int, int], tensor_format: str, is_guidance_distilled: bool, uncond_cond_input_kwargs_identifiers: list[str], current_timestep_callback: Callable[[], int], low_frequency_weight_callback: Callable[[torch.nn.Module], torch.Tensor], high_frequency_weight_callback: Callable[[torch.nn.Module], torch.Tensor], ) -> None: super().__init__() self.unconditional_batch_skip_range = unconditional_batch_skip_range self.unconditional_batch_timestep_skip_range = unconditional_batch_timestep_skip_range # We can't easily detect what args are to be split in unconditional and conditional branches. We # can only do it for kwargs, hence they are the only ones we split. The args are passed as-is. # If a model is to be made compatible with FasterCache, the user must ensure that the inputs that # contain batchwise-concatenated unconditional and conditional inputs are passed as kwargs. self.uncond_cond_input_kwargs_identifiers = uncond_cond_input_kwargs_identifiers self.tensor_format = tensor_format self.is_guidance_distilled = is_guidance_distilled self.current_timestep_callback = current_timestep_callback self.low_frequency_weight_callback = low_frequency_weight_callback self.high_frequency_weight_callback = high_frequency_weight_callback def initialize_hook(self, module): self.state = FasterCacheDenoiserState() return module @staticmethod def _get_cond_input(input: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: # Note: this method assumes that the input tensor is batchwise-concatenated with unconditional inputs # followed by conditional inputs. _, cond = input.chunk(2, dim=0) return cond def new_forward(self, module: torch.nn.Module, *args, **kwargs) -> Any: # Split the unconditional and conditional inputs. We only want to infer the conditional branch if the # requirements for skipping the unconditional branch are met as described in the paper. # We skip the unconditional branch only if the following conditions are met: # 1. We have completed at least one iteration of the denoiser # 2. The current timestep is within the range specified by the user. This is the optimal timestep range # where approximating the unconditional branch from the computation of the conditional branch is possible # without a significant loss in quality. # 3. The current iteration is not a multiple of the unconditional batch skip range. This is done so that # we compute the unconditional branch at least once every few iterations to ensure minimal quality loss. is_within_timestep_range = ( self.unconditional_batch_timestep_skip_range[0] < self.current_timestep_callback() < self.unconditional_batch_timestep_skip_range[1] ) should_skip_uncond = ( self.state.iteration > 0 and is_within_timestep_range and self.state.iteration % self.unconditional_batch_skip_range != 0 and not self.is_guidance_distilled ) if should_skip_uncond: is_any_kwarg_uncond = any(k in self.uncond_cond_input_kwargs_identifiers for k in kwargs.keys()) if is_any_kwarg_uncond: logger.debug("FasterCache - Skipping unconditional branch computation") args = tuple([self._get_cond_input(arg) if torch.is_tensor(arg) else arg for arg in args]) kwargs = { k: v if k not in self.uncond_cond_input_kwargs_identifiers else self._get_cond_input(v) for k, v in kwargs.items() } output = self.fn_ref.original_forward(*args, **kwargs) if self.is_guidance_distilled: self.state.iteration += 1 return output if torch.is_tensor(output): hidden_states = output elif isinstance(output, (tuple, Transformer2DModelOutput)): hidden_states = output[0] batch_size = hidden_states.size(0) if should_skip_uncond: self.state.low_frequency_delta = self.state.low_frequency_delta * self.low_frequency_weight_callback( module ) self.state.high_frequency_delta = self.state.high_frequency_delta * self.high_frequency_weight_callback( module ) if self.tensor_format == "BCFHW": hidden_states = hidden_states.permute(0, 2, 1, 3, 4) if self.tensor_format == "BCFHW" or self.tensor_format == "BFCHW": hidden_states = hidden_states.flatten(0, 1) low_freq_cond, high_freq_cond = _split_low_high_freq(hidden_states.float()) # Approximate/compute the unconditional branch outputs as described in Equation 9 and 10 of the paper low_freq_uncond = self.state.low_frequency_delta + low_freq_cond high_freq_uncond = self.state.high_frequency_delta + high_freq_cond uncond_freq = low_freq_uncond + high_freq_uncond uncond_states = torch.fft.ifftshift(uncond_freq) uncond_states = torch.fft.ifft2(uncond_states).real if self.tensor_format == "BCFHW" or self.tensor_format == "BFCHW": uncond_states = uncond_states.unflatten(0, (batch_size, -1)) hidden_states = hidden_states.unflatten(0, (batch_size, -1)) if self.tensor_format == "BCFHW": uncond_states = uncond_states.permute(0, 2, 1, 3, 4) hidden_states = hidden_states.permute(0, 2, 1, 3, 4) # Concatenate the approximated unconditional and predicted conditional branches uncond_states = uncond_states.to(hidden_states.dtype) hidden_states = torch.cat([uncond_states, hidden_states], dim=0) else: uncond_states, cond_states = hidden_states.chunk(2, dim=0) if self.tensor_format == "BCFHW": uncond_states = uncond_states.permute(0, 2, 1, 3, 4) cond_states = cond_states.permute(0, 2, 1, 3, 4) if self.tensor_format == "BCFHW" or self.tensor_format == "BFCHW": uncond_states = uncond_states.flatten(0, 1) cond_states = cond_states.flatten(0, 1) low_freq_uncond, high_freq_uncond = _split_low_high_freq(uncond_states.float()) low_freq_cond, high_freq_cond = _split_low_high_freq(cond_states.float()) self.state.low_frequency_delta = low_freq_uncond - low_freq_cond self.state.high_frequency_delta = high_freq_uncond - high_freq_cond self.state.iteration += 1 if torch.is_tensor(output): output = hidden_states elif isinstance(output, tuple): output = (hidden_states, *output[1:]) else: output.sample = hidden_states return output def reset_state(self, module: torch.nn.Module) -> torch.nn.Module: self.state.reset() return module class FasterCacheBlockHook(ModelHook): _is_stateful = True def __init__( self, block_skip_range: int, timestep_skip_range: tuple[int, int], is_guidance_distilled: bool, weight_callback: Callable[[torch.nn.Module], float], current_timestep_callback: Callable[[], int], ) -> None: super().__init__() self.block_skip_range = block_skip_range self.timestep_skip_range = timestep_skip_range self.is_guidance_distilled = is_guidance_distilled self.weight_callback = weight_callback self.current_timestep_callback = current_timestep_callback def initialize_hook(self, module): self.state = FasterCacheBlockState() return module def _compute_approximated_attention_output( self, t_2_output: torch.Tensor, t_output: torch.Tensor, weight: float, batch_size: int ) -> torch.Tensor: if t_2_output.size(0) != batch_size: # The cache t_2_output contains both batchwise-concatenated unconditional-conditional branch outputs. Just # take the conditional branch outputs. assert t_2_output.size(0) == 2 * batch_size t_2_output = t_2_output[batch_size:] if t_output.size(0) != batch_size: # The cache t_output contains both batchwise-concatenated unconditional-conditional branch outputs. Just # take the conditional branch outputs. assert t_output.size(0) == 2 * batch_size t_output = t_output[batch_size:] return t_output + (t_output - t_2_output) * weight def new_forward(self, module: torch.nn.Module, *args, **kwargs) -> Any: batch_size = [ *[arg.size(0) for arg in args if torch.is_tensor(arg)], *[v.size(0) for v in kwargs.values() if torch.is_tensor(v)], ][0] if self.state.batch_size is None: # Will be updated on first forward pass through the denoiser self.state.batch_size = batch_size # If we have to skip due to the skip conditions, then let's skip as expected. # But, we can't skip if the denoiser wants to infer both unconditional and conditional branches. This # is because the expected output shapes of attention layer will not match if we only return values from # the cache (which only caches conditional branch outputs). So, if state.batch_size (which is the true # unconditional-conditional batch size) is same as the current batch size, we don't perform the layer # skip. Otherwise, we conditionally skip the layer based on what state.skip_callback returns. is_within_timestep_range = ( self.timestep_skip_range[0] < self.current_timestep_callback() < self.timestep_skip_range[1] ) if not is_within_timestep_range: should_skip_attention = False else: should_compute_attention = self.state.iteration > 0 and self.state.iteration % self.block_skip_range == 0 should_skip_attention = not should_compute_attention if should_skip_attention: should_skip_attention = self.is_guidance_distilled or self.state.batch_size != batch_size if should_skip_attention: logger.debug("FasterCache - Skipping attention and using approximation") if torch.is_tensor(self.state.cache[-1]): t_2_output, t_output = self.state.cache weight = self.weight_callback(module) output = self._compute_approximated_attention_output(t_2_output, t_output, weight, batch_size) else: # The cache contains multiple tensors from past N iterations (N=2 for FasterCache). We need to handle all of them. # Diffusers blocks can return multiple tensors - let's call them [A, B, C, ...] for simplicity. # In our cache, we would have [[A_1, B_1, C_1, ...], [A_2, B_2, C_2, ...], ...] where each list is the output from # a forward pass of the block. We need to compute the approximated output for each of these tensors. # The zip(*state.cache) operation will give us [(A_1, A_2, ...), (B_1, B_2, ...), (C_1, C_2, ...), ...] which # allows us to compute the approximated attention output for each tensor in the cache. output = () for t_2_output, t_output in zip(*self.state.cache): result = self._compute_approximated_attention_output( t_2_output, t_output, self.weight_callback(module), batch_size ) output += (result,) else: logger.debug("FasterCache - Computing attention") output = self.fn_ref.original_forward(*args, **kwargs) # Note that the following condition for getting hidden_states should suffice since Diffusers blocks either return # a single hidden_states tensor, or a tuple of (hidden_states, encoder_hidden_states) tensors. We need to handle # both cases. if torch.is_tensor(output): cache_output = output if not self.is_guidance_distilled and cache_output.size(0) == self.state.batch_size: # The output here can be both unconditional-conditional branch outputs or just conditional branch outputs. # This is determined at the higher-level denoiser module. We only want to cache the conditional branch outputs. cache_output = cache_output.chunk(2, dim=0)[1] else: # Cache all return values and perform the same operation as above cache_output = () for out in output: if not self.is_guidance_distilled and out.size(0) == self.state.batch_size: out = out.chunk(2, dim=0)[1] cache_output += (out,) if self.state.cache is None: self.state.cache = [cache_output, cache_output] else: self.state.cache = [self.state.cache[-1], cache_output] self.state.iteration += 1 return output def reset_state(self, module: torch.nn.Module) -> torch.nn.Module: self.state.reset() return module def apply_faster_cache(module: torch.nn.Module, config: FasterCacheConfig) -> None: r""" Applies [FasterCache](https://huggingface.co/papers/2410.19355) to a given pipeline. Args: module (`torch.nn.Module`): The pytorch module to apply FasterCache to. Typically, this should be a transformer architecture supported in Diffusers, such as `CogVideoXTransformer3DModel`, but external implementations may also work. config (`FasterCacheConfig`): The configuration to use for FasterCache. Example: ```python >>> import torch >>> from diffusers import CogVideoXPipeline, FasterCacheConfig, apply_faster_cache >>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> config = FasterCacheConfig( ... spatial_attention_block_skip_range=2, ... spatial_attention_timestep_skip_range=(-1, 681), ... low_frequency_weight_update_timestep_range=(99, 641), ... high_frequency_weight_update_timestep_range=(-1, 301), ... spatial_attention_block_identifiers=["transformer_blocks"], ... attention_weight_callback=lambda _: 0.3, ... tensor_format="BFCHW", ... ) >>> apply_faster_cache(pipe.transformer, config) ``` """ logger.warning( "FasterCache is a purely experimental feature and may not work as expected. Not all models support FasterCache. " "The API is subject to change in future releases, with no guarantee of backward compatibility. Please report any issues at " "https://github.com/huggingface/diffusers/issues." ) if config.attention_weight_callback is None: # If the user has not provided a weight callback, we default to 0.5 for all timesteps. # In the paper, they recommend using a gradually increasing weight from 0 to 1 as the inference progresses, but # this depends from model-to-model. It is required by the user to provide a weight callback if they want to # use a different weight function. Defaulting to 0.5 works well in practice for most cases. logger.warning( "No `attention_weight_callback` provided when enabling FasterCache. Defaulting to using a weight of 0.5 for all timesteps." ) config.attention_weight_callback = lambda _: 0.5 if config.low_frequency_weight_callback is None: logger.debug( "Low frequency weight callback not provided when enabling FasterCache. Defaulting to behaviour described in the paper." ) def low_frequency_weight_callback(module: torch.nn.Module) -> float: is_within_range = ( config.low_frequency_weight_update_timestep_range[0] < config.current_timestep_callback() < config.low_frequency_weight_update_timestep_range[1] ) return config.alpha_low_frequency if is_within_range else 1.0 config.low_frequency_weight_callback = low_frequency_weight_callback if config.high_frequency_weight_callback is None: logger.debug( "High frequency weight callback not provided when enabling FasterCache. Defaulting to behaviour described in the paper." ) def high_frequency_weight_callback(module: torch.nn.Module) -> float: is_within_range = ( config.high_frequency_weight_update_timestep_range[0] < config.current_timestep_callback() < config.high_frequency_weight_update_timestep_range[1] ) return config.alpha_high_frequency if is_within_range else 1.0 config.high_frequency_weight_callback = high_frequency_weight_callback supported_tensor_formats = ["BCFHW", "BFCHW", "BCHW"] # TODO(aryan): Support BSC for LTX Video if config.tensor_format not in supported_tensor_formats: raise ValueError(f"`tensor_format` must be one of {supported_tensor_formats}, but got {config.tensor_format}.") _apply_faster_cache_on_denoiser(module, config) for name, submodule in module.named_modules(): if not isinstance(submodule, _ATTENTION_CLASSES): continue if any(re.search(identifier, name) is not None for identifier in _TRANSFORMER_BLOCK_IDENTIFIERS): _apply_faster_cache_on_attention_class(name, submodule, config) def _apply_faster_cache_on_denoiser(module: torch.nn.Module, config: FasterCacheConfig) -> None: hook = FasterCacheDenoiserHook( config.unconditional_batch_skip_range, config.unconditional_batch_timestep_skip_range, config.tensor_format, config.is_guidance_distilled, config._unconditional_conditional_input_kwargs_identifiers, config.current_timestep_callback, config.low_frequency_weight_callback, config.high_frequency_weight_callback, ) registry = HookRegistry.check_if_exists_or_initialize(module) registry.register_hook(hook, _FASTER_CACHE_DENOISER_HOOK) def _apply_faster_cache_on_attention_class(name: str, module: AttentionModuleMixin, config: FasterCacheConfig) -> None: is_spatial_self_attention = ( any(re.search(identifier, name) is not None for identifier in config.spatial_attention_block_identifiers) and config.spatial_attention_block_skip_range is not None and not getattr(module, "is_cross_attention", False) ) is_temporal_self_attention = ( any(re.search(identifier, name) is not None for identifier in config.temporal_attention_block_identifiers) and config.temporal_attention_block_skip_range is not None and not module.is_cross_attention ) block_skip_range, timestep_skip_range, block_type = None, None, None if is_spatial_self_attention: block_skip_range = config.spatial_attention_block_skip_range timestep_skip_range = config.spatial_attention_timestep_skip_range block_type = "spatial" elif is_temporal_self_attention: block_skip_range = config.temporal_attention_block_skip_range timestep_skip_range = config.temporal_attention_timestep_skip_range block_type = "temporal" if block_skip_range is None or timestep_skip_range is None: logger.debug( f'Unable to apply FasterCache to the selected layer: "{name}" because it does ' f"not match any of the required criteria for spatial or temporal attention layers. Note, " f"however, that this layer may still be valid for applying PAB. Please specify the correct " f"block identifiers in the configuration or use the specialized `apply_faster_cache_on_module` " f"function to apply FasterCache to this layer." ) return logger.debug(f"Enabling FasterCache ({block_type}) for layer: {name}") hook = FasterCacheBlockHook( block_skip_range, timestep_skip_range, config.is_guidance_distilled, config.attention_weight_callback, config.current_timestep_callback, ) registry = HookRegistry.check_if_exists_or_initialize(module) registry.register_hook(hook, _FASTER_CACHE_BLOCK_HOOK) # Reference: https://github.com/Vchitect/FasterCache/blob/fab32c15014636dc854948319c0a9a8d92c7acb4/scripts/latte/faster_cache_sample_latte.py#L127C1-L143C39 @torch.no_grad() def _split_low_high_freq(x): fft = torch.fft.fft2(x) fft_shifted = torch.fft.fftshift(fft) height, width = x.shape[-2:] radius = min(height, width) // 5 y_grid, x_grid = torch.meshgrid(torch.arange(height), torch.arange(width)) center_x, center_y = width // 2, height // 2 mask = (x_grid - center_x) ** 2 + (y_grid - center_y) ** 2 <= radius**2 low_freq_mask = mask.unsqueeze(0).unsqueeze(0).to(x.device) high_freq_mask = ~low_freq_mask low_freq_fft = fft_shifted * low_freq_mask high_freq_fft = fft_shifted * high_freq_mask return low_freq_fft, high_freq_fft
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/hooks/faster_cache.py", "license": "Apache License 2.0", "lines": 557, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/ltx/pipeline_ltx_condition.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from dataclasses import dataclass from typing import Any, Callable import PIL.Image import torch from transformers import T5EncoderModel, T5TokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import FromSingleFileMixin, LTXVideoLoraLoaderMixin from ...models.autoencoders import AutoencoderKLLTXVideo from ...models.transformers import LTXVideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import LTXPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers.pipelines.ltx.pipeline_ltx_condition import LTXConditionPipeline, LTXVideoCondition >>> from diffusers.utils import export_to_video, load_video, load_image >>> pipe = LTXConditionPipeline.from_pretrained("Lightricks/LTX-Video-0.9.5", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> # Load input image and video >>> video = load_video( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input-vid.mp4" ... ) >>> image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input.jpg" ... ) >>> # Create conditioning objects >>> condition1 = LTXVideoCondition( ... image=image, ... frame_index=0, ... ) >>> condition2 = LTXVideoCondition( ... video=video, ... frame_index=80, ... ) >>> prompt = "The video depicts a long, straight highway stretching into the distance, flanked by metal guardrails. The road is divided into multiple lanes, with a few vehicles visible in the far distance. The surrounding landscape features dry, grassy fields on one side and rolling hills on the other. The sky is mostly clear with a few scattered clouds, suggesting a bright, sunny day. And then the camera switch to a winding mountain road covered in snow, with a single vehicle traveling along it. The road is flanked by steep, rocky cliffs and sparse vegetation. The landscape is characterized by rugged terrain and a river visible in the distance. The scene captures the solitude and beauty of a winter drive through a mountainous region." >>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" >>> # Generate video >>> generator = torch.Generator("cuda").manual_seed(0) >>> # Text-only conditioning is also supported without the need to pass `conditions` >>> video = pipe( ... conditions=[condition1, condition2], ... prompt=prompt, ... negative_prompt=negative_prompt, ... width=768, ... height=512, ... num_frames=161, ... num_inference_steps=40, ... generator=generator, ... ).frames[0] >>> export_to_video(video, "output.mp4", fps=24) ``` """ @dataclass class LTXVideoCondition: """ Defines a single frame-conditioning item for LTX Video - a single frame or a sequence of frames. Attributes: image (`PIL.Image.Image`): The image to condition the video on. video (`list[PIL.Image.Image]`): The video to condition the video on. frame_index (`int`): The frame index at which the image or video will conditionally effect the video generation. strength (`float`, defaults to `1.0`): The strength of the conditioning effect. A value of `1.0` means the conditioning effect is fully applied. """ image: PIL.Image.Image | None = None video: list[PIL.Image.Image] | None = None frame_index: int = 0 strength: float = 1.0 # from LTX-Video/ltx_video/schedulers/rf.py def linear_quadratic_schedule(num_steps, threshold_noise=0.025, linear_steps=None): if linear_steps is None: linear_steps = num_steps // 2 if num_steps < 2: return torch.tensor([1.0]) linear_sigma_schedule = [i * threshold_noise / linear_steps for i in range(linear_steps)] threshold_noise_step_diff = linear_steps - threshold_noise * num_steps quadratic_steps = num_steps - linear_steps quadratic_coef = threshold_noise_step_diff / (linear_steps * quadratic_steps**2) linear_coef = threshold_noise / linear_steps - 2 * threshold_noise_step_diff / (quadratic_steps**2) const = quadratic_coef * (linear_steps**2) quadratic_sigma_schedule = [ quadratic_coef * (i**2) + linear_coef * i + const for i in range(linear_steps, num_steps) ] sigma_schedule = linear_sigma_schedule + quadratic_sigma_schedule + [1.0] sigma_schedule = [1.0 - x for x in sigma_schedule] return torch.tensor(sigma_schedule[:-1]) # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class LTXConditionPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixin): r""" Pipeline for text/image/video-to-video generation. Reference: https://github.com/Lightricks/LTX-Video Args: transformer ([`LTXVideoTransformer3DModel`]): Conditional Transformer architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLLTXVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLLTXVideo, text_encoder: T5EncoderModel, tokenizer: T5TokenizerFast, transformer: LTXVideoTransformer3DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.transformer_spatial_patch_size = ( self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 ) self.transformer_temporal_patch_size = ( self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.tokenizer_max_length = ( self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128 ) self.default_height = 512 self.default_width = 704 self.default_frames = 121 def _get_t5_prompt_embeds( self, prompt: str | list[str] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 256, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.mochi.pipeline_mochi.MochiPipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, max_sequence_length: int = 256, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask def check_inputs( self, prompt, conditions, image, video, frame_index, strength, denoise_strength, height, width, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) if conditions is not None and (image is not None or video is not None): raise ValueError("If `conditions` is provided, `image` and `video` must not be provided.") if conditions is None: if isinstance(image, list) and isinstance(frame_index, list) and len(image) != len(frame_index): raise ValueError( "If `conditions` is not provided, `image` and `frame_index` must be of the same length." ) elif isinstance(image, list) and isinstance(strength, list) and len(image) != len(strength): raise ValueError("If `conditions` is not provided, `image` and `strength` must be of the same length.") elif isinstance(video, list) and isinstance(frame_index, list) and len(video) != len(frame_index): raise ValueError( "If `conditions` is not provided, `video` and `frame_index` must be of the same length." ) elif isinstance(video, list) and isinstance(strength, list) and len(video) != len(strength): raise ValueError("If `conditions` is not provided, `video` and `strength` must be of the same length.") if denoise_strength < 0 or denoise_strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {denoise_strength}") @staticmethod def _prepare_video_ids( batch_size: int, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1, device: torch.device = None, ) -> torch.Tensor: latent_sample_coords = torch.meshgrid( torch.arange(0, num_frames, patch_size_t, device=device), torch.arange(0, height, patch_size, device=device), torch.arange(0, width, patch_size, device=device), indexing="ij", ) latent_sample_coords = torch.stack(latent_sample_coords, dim=0) latent_coords = latent_sample_coords.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1) latent_coords = latent_coords.reshape(batch_size, -1, num_frames * height * width) return latent_coords @staticmethod def _scale_video_ids( video_ids: torch.Tensor, scale_factor: int = 32, scale_factor_t: int = 8, frame_index: int = 0, device: torch.device = None, ) -> torch.Tensor: scaled_latent_coords = ( video_ids * torch.tensor([scale_factor_t, scale_factor, scale_factor], device=video_ids.device)[None, :, None] ) scaled_latent_coords[:, 0] = (scaled_latent_coords[:, 0] + 1 - scale_factor_t).clamp(min=0) scaled_latent_coords[:, 0] += frame_index return scaled_latent_coords @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._pack_latents def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor: # Unpacked latents of shape are [B, C, F, H, W] are patched into tokens of shape [B, C, F // p_t, p_t, H // p, p, W // p, p]. # The patch dimensions are then permuted and collapsed into the channel dimension of shape: # [B, F // p_t * H // p * W // p, C * p_t * p * p] (an ndim=3 tensor). # dim=0 is the batch size, dim=1 is the effective video sequence length, dim=2 is the effective number of input features batch_size, num_channels, num_frames, height, width = latents.shape post_patch_num_frames = num_frames // patch_size_t post_patch_height = height // patch_size post_patch_width = width // patch_size latents = latents.reshape( batch_size, -1, post_patch_num_frames, patch_size_t, post_patch_height, patch_size, post_patch_width, patch_size, ) latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3) return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._unpack_latents def _unpack_latents( latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1 ) -> torch.Tensor: # Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions) # are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of # what happens in the `_pack_latents` method. batch_size = latents.size(0) latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size) latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3) return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._normalize_latents def _normalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Normalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = (latents - latents_mean) * scaling_factor / latents_std return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._denormalize_latents def _denormalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Denormalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / scaling_factor + latents_mean return latents def trim_conditioning_sequence(self, start_frame: int, sequence_num_frames: int, target_num_frames: int): """ Trim a conditioning sequence to the allowed number of frames. Args: start_frame (int): The target frame number of the first frame in the sequence. sequence_num_frames (int): The number of frames in the sequence. target_num_frames (int): The target number of frames in the generated video. Returns: int: updated sequence length """ scale_factor = self.vae_temporal_compression_ratio num_frames = min(sequence_num_frames, target_num_frames - start_frame) # Trim down to a multiple of temporal_scale_factor frames plus 1 num_frames = (num_frames - 1) // scale_factor * scale_factor + 1 return num_frames @staticmethod def add_noise_to_image_conditioning_latents( t: float, init_latents: torch.Tensor, latents: torch.Tensor, noise_scale: float, conditioning_mask: torch.Tensor, generator, eps=1e-6, ): """ Add timestep-dependent noise to the hard-conditioning latents. This helps with motion continuity, especially when conditioned on a single frame. """ noise = randn_tensor( latents.shape, generator=generator, device=latents.device, dtype=latents.dtype, ) # Add noise only to hard-conditioning latents (conditioning_mask = 1.0) need_to_noise = (conditioning_mask > 1.0 - eps).unsqueeze(-1) noised_latents = init_latents + noise_scale * noise * (t**2) latents = torch.where(need_to_noise, noised_latents, latents) return latents def prepare_latents( self, conditions: list[torch.Tensor] | None = None, condition_strength: list[float] | None = None, condition_frame_index: list[int] | None = None, batch_size: int = 1, num_channels_latents: int = 128, height: int = 512, width: int = 704, num_frames: int = 161, num_prefix_latent_frames: int = 2, sigma: torch.Tensor | None = None, latents: torch.Tensor | None = None, generator: torch.Generator | None = None, device: torch.device | None = None, dtype: torch.dtype | None = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]: num_latent_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width) noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) if latents is not None and sigma is not None: if latents.shape != shape: raise ValueError( f"Latents shape {latents.shape} does not match expected shape {shape}. Please check the input." ) latents = latents.to(device=device, dtype=dtype) sigma = sigma.to(device=device, dtype=dtype) latents = sigma * noise + (1 - sigma) * latents else: latents = noise if len(conditions) > 0: condition_latent_frames_mask = torch.zeros( (batch_size, num_latent_frames), device=device, dtype=torch.float32 ) extra_conditioning_latents = [] extra_conditioning_video_ids = [] extra_conditioning_mask = [] extra_conditioning_num_latents = 0 for data, strength, frame_index in zip(conditions, condition_strength, condition_frame_index): condition_latents = retrieve_latents(self.vae.encode(data), generator=generator) condition_latents = self._normalize_latents( condition_latents, self.vae.latents_mean, self.vae.latents_std ).to(device, dtype=dtype) num_data_frames = data.size(2) num_cond_frames = condition_latents.size(2) if frame_index == 0: latents[:, :, :num_cond_frames] = torch.lerp( latents[:, :, :num_cond_frames], condition_latents, strength ) condition_latent_frames_mask[:, :num_cond_frames] = strength else: if num_data_frames > 1: if num_cond_frames < num_prefix_latent_frames: raise ValueError( f"Number of latent frames must be at least {num_prefix_latent_frames} but got {num_data_frames}." ) if num_cond_frames > num_prefix_latent_frames: start_frame = frame_index // self.vae_temporal_compression_ratio + num_prefix_latent_frames end_frame = start_frame + num_cond_frames - num_prefix_latent_frames latents[:, :, start_frame:end_frame] = torch.lerp( latents[:, :, start_frame:end_frame], condition_latents[:, :, num_prefix_latent_frames:], strength, ) condition_latent_frames_mask[:, start_frame:end_frame] = strength condition_latents = condition_latents[:, :, :num_prefix_latent_frames] noise = randn_tensor(condition_latents.shape, generator=generator, device=device, dtype=dtype) condition_latents = torch.lerp(noise, condition_latents, strength) condition_video_ids = self._prepare_video_ids( batch_size, condition_latents.size(2), latent_height, latent_width, patch_size=self.transformer_spatial_patch_size, patch_size_t=self.transformer_temporal_patch_size, device=device, ) condition_video_ids = self._scale_video_ids( condition_video_ids, scale_factor=self.vae_spatial_compression_ratio, scale_factor_t=self.vae_temporal_compression_ratio, frame_index=frame_index, device=device, ) condition_latents = self._pack_latents( condition_latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) condition_conditioning_mask = torch.full( condition_latents.shape[:2], strength, device=device, dtype=dtype ) extra_conditioning_latents.append(condition_latents) extra_conditioning_video_ids.append(condition_video_ids) extra_conditioning_mask.append(condition_conditioning_mask) extra_conditioning_num_latents += condition_latents.size(1) video_ids = self._prepare_video_ids( batch_size, num_latent_frames, latent_height, latent_width, patch_size_t=self.transformer_temporal_patch_size, patch_size=self.transformer_spatial_patch_size, device=device, ) if len(conditions) > 0: conditioning_mask = condition_latent_frames_mask.gather(1, video_ids[:, 0]) else: conditioning_mask, extra_conditioning_num_latents = None, 0 video_ids = self._scale_video_ids( video_ids, scale_factor=self.vae_spatial_compression_ratio, scale_factor_t=self.vae_temporal_compression_ratio, frame_index=0, device=device, ) latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) if len(conditions) > 0 and len(extra_conditioning_latents) > 0: latents = torch.cat([*extra_conditioning_latents, latents], dim=1) video_ids = torch.cat([*extra_conditioning_video_ids, video_ids], dim=2) conditioning_mask = torch.cat([*extra_conditioning_mask, conditioning_mask], dim=1) return latents, conditioning_mask, video_ids, extra_conditioning_num_latents def get_timesteps(self, sigmas, timesteps, num_inference_steps, strength): num_steps = min(int(num_inference_steps * strength), num_inference_steps) start_index = max(num_inference_steps - num_steps, 0) sigmas = sigmas[start_index:] timesteps = timesteps[start_index:] return sigmas, timesteps, num_inference_steps - start_index @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, conditions: LTXVideoCondition | list[LTXVideoCondition] = None, image: PipelineImageInput | list[PipelineImageInput] = None, video: list[PipelineImageInput] = None, frame_index: int | list[int] = 0, strength: float | list[float] = 1.0, denoise_strength: float = 1.0, prompt: str | list[str] = None, negative_prompt: str | list[str] | None = None, height: int = 512, width: int = 704, num_frames: int = 161, frame_rate: int = 25, num_inference_steps: int = 50, timesteps: list[int] = None, guidance_scale: float = 3, guidance_rescale: float = 0.0, image_cond_noise_scale: float = 0.15, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, decode_timestep: float | list[float] = 0.0, decode_noise_scale: float | list[float] | None = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 256, ): r""" Function invoked when calling the pipeline for generation. Args: conditions (`list[LTXVideoCondition], *optional*`): The list of frame-conditioning items for the video generation.If not provided, conditions will be created using `image`, `video`, `frame_index` and `strength`. image (`PipelineImageInput` or `list[PipelineImageInput]`, *optional*): The image or images to condition the video generation. If not provided, one has to pass `video` or `conditions`. video (`list[PipelineImageInput]`, *optional*): The video to condition the video generation. If not provided, one has to pass `image` or `conditions`. frame_index (`int` or `list[int]`, *optional*): The frame index or frame indices at which the image or video will conditionally effect the video generation. If not provided, one has to pass `conditions`. strength (`float` or `list[float]`, *optional*): The strength or strengths of the conditioning effect. If not provided, one has to pass `conditions`. denoise_strength (`float`, defaults to `1.0`): The strength of the noise added to the latents for editing. Higher strength leads to more noise added to the latents, therefore leading to more differences between original video and generated video. This is useful for video-to-video editing. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, defaults to `512`): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, defaults to `704`): The width in pixels of the generated image. This is set to 848 by default for the best results. num_frames (`int`, defaults to `161`): The number of video frames to generate num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`list[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, defaults to `3 `): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. decode_timestep (`float`, defaults to `0.0`): The timestep at which generated video is decoded. decode_noise_scale (`float`, defaults to `None`): The interpolation factor between random noise and denoised latents at the decode timestep. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ltx.LTXPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `128 `): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ltx.LTXPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ltx.LTXPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, conditions=conditions, image=image, video=video, frame_index=frame_index, strength=strength, denoise_strength=denoise_strength, height=height, width=width, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._attention_kwargs = attention_kwargs self._interrupt = False self._current_timestep = None # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if conditions is not None: if not isinstance(conditions, list): conditions = [conditions] strength = [condition.strength for condition in conditions] frame_index = [condition.frame_index for condition in conditions] image = [condition.image for condition in conditions] video = [condition.video for condition in conditions] elif image is not None or video is not None: if not isinstance(image, list): image = [image] num_conditions = 1 elif isinstance(image, list): num_conditions = len(image) if not isinstance(video, list): video = [video] num_conditions = 1 elif isinstance(video, list): num_conditions = len(video) if not isinstance(frame_index, list): frame_index = [frame_index] * num_conditions if not isinstance(strength, list): strength = [strength] * num_conditions device = self._execution_device vae_dtype = self.vae.dtype # 3. Prepare text embeddings & conditioning image/video ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, device=device, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) conditioning_tensors = [] is_conditioning_image_or_video = image is not None or video is not None if is_conditioning_image_or_video: for condition_image, condition_video, condition_frame_index, condition_strength in zip( image, video, frame_index, strength ): if condition_image is not None: condition_tensor = ( self.video_processor.preprocess(condition_image, height, width) .unsqueeze(2) .to(device, dtype=vae_dtype) ) elif condition_video is not None: condition_tensor = self.video_processor.preprocess_video(condition_video, height, width) num_frames_input = condition_tensor.size(2) num_frames_output = self.trim_conditioning_sequence( condition_frame_index, num_frames_input, num_frames ) condition_tensor = condition_tensor[:, :, :num_frames_output] condition_tensor = condition_tensor.to(device, dtype=vae_dtype) else: raise ValueError("Either `image` or `video` must be provided for conditioning.") if condition_tensor.size(2) % self.vae_temporal_compression_ratio != 1: raise ValueError( f"Number of frames in the video must be of the form (k * {self.vae_temporal_compression_ratio} + 1) " f"but got {condition_tensor.size(2)} frames." ) conditioning_tensors.append(condition_tensor) # 4. Prepare timesteps latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio if timesteps is None: sigmas = linear_quadratic_schedule(num_inference_steps) timesteps = sigmas * 1000 if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps, ) sigmas = self.scheduler.sigmas num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) latent_sigma = None if denoise_strength < 1: sigmas, timesteps, num_inference_steps = self.get_timesteps( sigmas, timesteps, num_inference_steps, denoise_strength ) latent_sigma = sigmas[:1].repeat(batch_size * num_videos_per_prompt) self._num_timesteps = len(timesteps) # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents, conditioning_mask, video_coords, extra_conditioning_num_latents = self.prepare_latents( conditioning_tensors, strength, frame_index, batch_size=batch_size * num_videos_per_prompt, num_channels_latents=num_channels_latents, height=height, width=width, num_frames=num_frames, sigma=latent_sigma, latents=latents, generator=generator, device=device, dtype=torch.float32, ) video_coords = video_coords.float() video_coords[:, 0] = video_coords[:, 0] * (1.0 / frame_rate) init_latents = latents.clone() if is_conditioning_image_or_video else None if self.do_classifier_free_guidance: video_coords = torch.cat([video_coords, video_coords], dim=0) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t if image_cond_noise_scale > 0 and init_latents is not None: # Add timestep-dependent noise to the hard-conditioning latents # This helps with motion continuity, especially when conditioned on a single frame latents = self.add_noise_to_image_conditioning_latents( t / 1000.0, init_latents, latents, image_cond_noise_scale, conditioning_mask, generator, ) latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents if is_conditioning_image_or_video: conditioning_mask_model_input = ( torch.cat([conditioning_mask, conditioning_mask]) if self.do_classifier_free_guidance else conditioning_mask ) latent_model_input = latent_model_input.to(prompt_embeds.dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]).unsqueeze(-1).float() if is_conditioning_image_or_video: timestep = torch.min(timestep, (1 - conditioning_mask_model_input) * 1000.0) with self.transformer.cache_context("cond_uncond"): noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, encoder_attention_mask=prompt_attention_mask, video_coords=video_coords, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) timestep, _ = timestep.chunk(2) if self.guidance_rescale > 0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg( noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale ) denoised_latents = self.scheduler.step( -noise_pred, t, latents, per_token_timesteps=timestep, return_dict=False )[0] if is_conditioning_image_or_video: tokens_to_denoise_mask = (t / 1000 - 1e-6 < (1.0 - conditioning_mask)).unsqueeze(-1) latents = torch.where(tokens_to_denoise_mask, denoised_latents, latents) else: latents = denoised_latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if is_conditioning_image_or_video: latents = latents[:, extra_conditioning_num_latents:] latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) if output_type == "latent": video = latents else: latents = self._denormalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) latents = latents.to(prompt_embeds.dtype) if not self.vae.config.timestep_conditioning: timestep = None else: noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) if not isinstance(decode_timestep, list): decode_timestep = [decode_timestep] * batch_size if decode_noise_scale is None: decode_noise_scale = decode_timestep elif not isinstance(decode_noise_scale, list): decode_noise_scale = [decode_noise_scale] * batch_size timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype) decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[ :, None, None, None, None ] latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise video = self.vae.decode(latents, timestep, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return LTXPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx/pipeline_ltx_condition.py", "license": "Apache License 2.0", "lines": 1137, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/ltx/test_ltx_condition.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from transformers import AutoConfig, AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKLLTXVideo, FlowMatchEulerDiscreteScheduler, LTXConditionPipeline, LTXVideoTransformer3DModel, ) from diffusers.pipelines.ltx.pipeline_ltx_condition import LTXVideoCondition from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class LTXConditionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = LTXConditionPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image"}) image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False def get_dummy_components(self): torch.manual_seed(0) transformer = LTXVideoTransformer3DModel( in_channels=8, out_channels=8, patch_size=1, patch_size_t=1, num_attention_heads=4, attention_head_dim=8, cross_attention_dim=32, num_layers=1, caption_channels=32, ) torch.manual_seed(0) vae = AutoencoderKLLTXVideo( in_channels=3, out_channels=3, latent_channels=8, block_out_channels=(8, 8, 8, 8), decoder_block_out_channels=(8, 8, 8, 8), layers_per_block=(1, 1, 1, 1, 1), decoder_layers_per_block=(1, 1, 1, 1, 1), spatio_temporal_scaling=(True, True, False, False), decoder_spatio_temporal_scaling=(True, True, False, False), decoder_inject_noise=(False, False, False, False, False), upsample_residual=(False, False, False, False), upsample_factor=(1, 1, 1, 1), timestep_conditioning=False, patch_size=1, patch_size_t=1, encoder_causal=True, decoder_causal=False, ) vae.use_framewise_encoding = False vae.use_framewise_decoding = False torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler() config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5") text_encoder = T5EncoderModel(config) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0, use_conditions=False): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = torch.randn((1, 3, 32, 32), generator=generator, device=device) if use_conditions: conditions = LTXVideoCondition( image=image, ) else: conditions = None inputs = { "conditions": conditions, "image": None if use_conditions else image, "prompt": "dance monkey", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 3.0, "height": 32, "width": 32, # 8 * k + 1 is the recommendation "num_frames": 9, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs2 = self.get_dummy_inputs(device, use_conditions=True) video = pipe(**inputs).frames generated_video = video[0] video2 = pipe(**inputs2).frames generated_video2 = video2[0] self.assertEqual(generated_video.shape, (9, 3, 32, 32)) max_diff = np.abs(generated_video - generated_video2).max() self.assertLessEqual(max_diff, 1e-3) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling pipe.vae.enable_tiling( tile_sample_min_height=96, tile_sample_min_width=96, tile_sample_stride_height=64, tile_sample_stride_width=64, ) inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", )
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/ltx/test_ltx_condition.py", "license": "Apache License 2.0", "lines": 239, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/cogview4-control/train_control_cogview4.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import copy import logging import math import os import random import shutil from contextlib import nullcontext from pathlib import Path import accelerate import numpy as np import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedType, ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from torchvision import transforms from tqdm.auto import tqdm import diffusers from diffusers import ( AutoencoderKL, CogView4ControlPipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import ( compute_density_for_timestep_sampling, compute_loss_weighting_for_sd3, free_memory, ) from diffusers.utils import check_min_version, is_wandb_available, load_image, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.37.0.dev0") logger = get_logger(__name__) NORM_LAYER_PREFIXES = ["norm_q", "norm_k", "norm_added_q", "norm_added_k"] def encode_images(pixels: torch.Tensor, vae: torch.nn.Module, weight_dtype): pixel_latents = vae.encode(pixels.to(vae.dtype)).latent_dist.sample() pixel_latents = (pixel_latents - vae.config.shift_factor) * vae.config.scaling_factor return pixel_latents.to(weight_dtype) def log_validation(cogview4_transformer, args, accelerator, weight_dtype, step, is_final_validation=False): logger.info("Running validation... ") if not is_final_validation: cogview4_transformer = accelerator.unwrap_model(cogview4_transformer) pipeline = CogView4ControlPipeline.from_pretrained( args.pretrained_model_name_or_path, transformer=cogview4_transformer, torch_dtype=weight_dtype, ) else: transformer = CogView4Transformer2DModel.from_pretrained(args.output_dir, torch_dtype=weight_dtype) pipeline = CogView4ControlPipeline.from_pretrained( args.pretrained_model_name_or_path, transformer=transformer, torch_dtype=weight_dtype, ) pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if len(args.validation_image) == len(args.validation_prompt): validation_images = args.validation_image validation_prompts = args.validation_prompt elif len(args.validation_image) == 1: validation_images = args.validation_image * len(args.validation_prompt) validation_prompts = args.validation_prompt elif len(args.validation_prompt) == 1: validation_images = args.validation_image validation_prompts = args.validation_prompt * len(args.validation_image) else: raise ValueError( "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`" ) image_logs = [] if is_final_validation or torch.backends.mps.is_available(): autocast_ctx = nullcontext() else: autocast_ctx = torch.autocast(accelerator.device.type, weight_dtype) for validation_prompt, validation_image in zip(validation_prompts, validation_images): validation_image = load_image(validation_image) # maybe need to inference on 1024 to get a good image validation_image = validation_image.resize((args.resolution, args.resolution)) images = [] for _ in range(args.num_validation_images): with autocast_ctx: image = pipeline( prompt=validation_prompt, control_image=validation_image, num_inference_steps=50, guidance_scale=args.guidance_scale, max_sequence_length=args.max_sequence_length, generator=generator, height=args.resolution, width=args.resolution, ).images[0] image = image.resize((args.resolution, args.resolution)) images.append(image) image_logs.append( {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} ) tracker_key = "test" if is_final_validation else "validation" for tracker in accelerator.trackers: if tracker.name == "tensorboard": for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] formatted_images = [] formatted_images.append(np.asarray(validation_image)) for image in images: formatted_images.append(np.asarray(image)) formatted_images = np.stack(formatted_images) tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") elif tracker.name == "wandb": formatted_images = [] for log in image_logs: images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] formatted_images.append(wandb.Image(validation_image, caption="Conditioning")) for image in images: image = wandb.Image(image, caption=validation_prompt) formatted_images.append(image) tracker.log({tracker_key: formatted_images}) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline free_memory() return image_logs def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None): img_str = "" if image_logs is not None: img_str = "You can find some example images below.\n\n" for i, log in enumerate(image_logs): images = log["images"] validation_prompt = log["validation_prompt"] validation_image = log["validation_image"] validation_image.save(os.path.join(repo_folder, "image_control.png")) img_str += f"prompt: {validation_prompt}\n" images = [validation_image] + images make_image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png")) img_str += f"![images_{i})](./images_{i}.png)\n" model_description = f""" # cogview4-control-{repo_id} These are Control weights trained on {base_model} with new type of conditioning. {img_str} ## License Please adhere to the licensing terms as described [here](https://huggingface.co/THUDM/CogView4-6b/blob/main/LICENSE.md) """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="other", base_model=base_model, model_description=model_description, inference=True, ) tags = [ "cogview4", "cogview4-diffusers", "text-to-image", "diffusers", "control", "diffusers-training", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a CogView4 Control training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--output_dir", type=str, default="cogview4-control", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=1024, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--max_sequence_length", type=int, default=128, help="The maximum sequence length for the prompt." ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" "instructions." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--proportion_empty_prompts", type=float, default=0, help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image." ) parser.add_argument( "--conditioning_image_column", type=str, default="conditioning_image", help="The column of the dataset containing the control conditioning image.", ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument("--log_dataset_samples", action="store_true", help="Whether to log somple dataset samples.") parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, nargs="+", help=( "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." " Provide either a matching number of `--validation_image`s, a single `--validation_image`" " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." ), ) parser.add_argument( "--validation_image", type=str, default=None, nargs="+", help=( "A set of paths to the control conditioning image be evaluated every `--validation_steps`" " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" " `--validation_image` that will be used with all `--validation_prompt`s." ), ) parser.add_argument( "--num_validation_images", type=int, default=1, help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument( "--tracker_project_name", type=str, default="cogview4_train_control", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) parser.add_argument( "--jsonl_for_train", type=str, default=None, help="Path to the jsonl file containing the training data.", ) parser.add_argument( "--only_target_transformer_blocks", action="store_true", help="If we should only target the transformer blocks to train along with the input layer (`x_embedder`).", ) parser.add_argument( "--guidance_scale", type=float, default=3.5, help="the guidance scale used for transformer.", ) parser.add_argument( "--upcast_before_saving", action="store_true", help=( "Whether to upcast the trained transformer layers to float32 before saving (at the end of training). " "Defaults to precision dtype used for training to save memory" ), ) parser.add_argument( "--weighting_scheme", type=str, default="none", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), ) parser.add_argument( "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--mode_scale", type=float, default=1.29, help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", ) parser.add_argument( "--offload", action="store_true", help="Whether to offload the VAE and the text encoders to CPU when they are not used.", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() if args.dataset_name is None and args.jsonl_for_train is None: raise ValueError("Specify either `--dataset_name` or `--jsonl_for_train`") if args.dataset_name is not None and args.jsonl_for_train is not None: raise ValueError("Specify only one of `--dataset_name` or `--jsonl_for_train`") if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") if args.validation_prompt is not None and args.validation_image is None: raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") if args.validation_prompt is None and args.validation_image is not None: raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") if ( args.validation_image is not None and args.validation_prompt is not None and len(args.validation_image) != 1 and len(args.validation_prompt) != 1 and len(args.validation_image) != len(args.validation_prompt) ): raise ValueError( "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," " or the same number of `--validation_prompt`s and `--validation_image`s" ) if args.resolution % 8 != 0: raise ValueError( "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the cogview4 transformer." ) return args def get_train_dataset(args, accelerator): dataset = None if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) if args.jsonl_for_train is not None: # load from json dataset = load_dataset("json", data_files=args.jsonl_for_train, cache_dir=args.cache_dir) dataset = dataset.flatten_indices() # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. if args.image_column is None: image_column = column_names[0] logger.info(f"image column defaulting to {image_column}") else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = column_names[1] logger.info(f"caption column defaulting to {caption_column}") else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) if args.conditioning_image_column is None: conditioning_image_column = column_names[2] logger.info(f"conditioning image column defaulting to {conditioning_image_column}") else: conditioning_image_column = args.conditioning_image_column if conditioning_image_column not in column_names: raise ValueError( f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) with accelerator.main_process_first(): train_dataset = dataset["train"].shuffle(seed=args.seed) if args.max_train_samples is not None: train_dataset = train_dataset.select(range(args.max_train_samples)) return train_dataset def prepare_train_dataset(dataset, accelerator): image_transforms = transforms.Compose( [ transforms.Resize((args.resolution, args.resolution), interpolation=transforms.InterpolationMode.BILINEAR), transforms.ToTensor(), transforms.Lambda(lambda x: x * 2 - 1), ] ) def preprocess_train(examples): images = [ (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB")) for image in examples[args.image_column] ] images = [image_transforms(image) for image in images] conditioning_images = [ (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB")) for image in examples[args.conditioning_image_column] ] conditioning_images = [image_transforms(image) for image in conditioning_images] examples["pixel_values"] = images examples["conditioning_pixel_values"] = conditioning_images is_caption_list = isinstance(examples[args.caption_column][0], list) if is_caption_list: examples["captions"] = [max(example, key=len) for example in examples[args.caption_column]] else: examples["captions"] = list(examples[args.caption_column]) return examples with accelerator.main_process_first(): dataset = dataset.with_transform(preprocess_train) return dataset def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() captions = [example["captions"] for example in examples] return {"pixel_values": pixel_values, "conditioning_pixel_values": conditioning_pixel_values, "captions": captions} def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) logging_out_dir = Path(args.output_dir, args.logging_dir) if torch.backends.mps.is_available() and args.mixed_precision == "bf16": # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=str(logging_out_dir)) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Disable AMP for MPS. A technique for accelerating machine learning computations on iOS and macOS devices. if torch.backends.mps.is_available(): logger.info("MPS is enabled. Disabling AMP.") accelerator.native_amp = False # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", # DEBUG, INFO, WARNING, ERROR, CRITICAL level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load models. We will load the text encoders later in a pipeline to compute # embeddings. vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant, ) cogview4_transformer = CogView4Transformer2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant, ) logger.info("All models loaded successfully") noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder="scheduler", ) noise_scheduler_copy = copy.deepcopy(noise_scheduler) if not args.only_target_transformer_blocks: cogview4_transformer.requires_grad_(True) vae.requires_grad_(False) # cast down and move to the CPU weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # let's not move the VAE to the GPU yet. vae.to(dtype=torch.float32) # keep the VAE in float32. # enable image inputs with torch.no_grad(): patch_size = cogview4_transformer.config.patch_size initial_input_channels = cogview4_transformer.config.in_channels * patch_size**2 new_linear = torch.nn.Linear( cogview4_transformer.patch_embed.proj.in_features * 2, cogview4_transformer.patch_embed.proj.out_features, bias=cogview4_transformer.patch_embed.proj.bias is not None, dtype=cogview4_transformer.dtype, device=cogview4_transformer.device, ) new_linear.weight.zero_() new_linear.weight[:, :initial_input_channels].copy_(cogview4_transformer.patch_embed.proj.weight) if cogview4_transformer.patch_embed.proj.bias is not None: new_linear.bias.copy_(cogview4_transformer.patch_embed.proj.bias) cogview4_transformer.patch_embed.proj = new_linear assert torch.all(cogview4_transformer.patch_embed.proj.weight[:, initial_input_channels:].data == 0) cogview4_transformer.register_to_config( in_channels=cogview4_transformer.config.in_channels * 2, out_channels=cogview4_transformer.config.in_channels ) if args.only_target_transformer_blocks: cogview4_transformer.patch_embed.proj.requires_grad_(True) for name, module in cogview4_transformer.named_modules(): if "transformer_blocks" in name: module.requires_grad_(True) else: module.requirs_grad_(False) def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: for model in models: if isinstance(unwrap_model(model), type(unwrap_model(cogview4_transformer))): model = unwrap_model(model) model.save_pretrained(os.path.join(output_dir, "transformer")) else: raise ValueError(f"unexpected save model: {model.__class__}") # make sure to pop weight so that corresponding model is not saved again if weights: weights.pop() def load_model_hook(models, input_dir): transformer_ = None if not accelerator.distributed_type == DistributedType.DEEPSPEED: while len(models) > 0: model = models.pop() if isinstance(unwrap_model(model), type(unwrap_model(cogview4_transformer))): transformer_ = model # noqa: F841 else: raise ValueError(f"unexpected save model: {unwrap_model(model).__class__}") else: transformer_ = CogView4Transformer2DModel.from_pretrained(input_dir, subfolder="transformer") # noqa: F841 accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: cogview4_transformer.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimization parameters optimizer = optimizer_class( cogview4_transformer.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Prepare dataset and dataloader. train_dataset = get_train_dataset(args, accelerator) train_dataset = prepare_train_dataset(train_dataset, accelerator) train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation. if args.max_train_steps is None: len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) num_training_steps_for_scheduler = ( args.num_train_epochs * num_update_steps_per_epoch * accelerator.num_processes ) else: num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. cogview4_transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( cogview4_transformer, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch if num_training_steps_for_scheduler != args.max_train_steps * accelerator.num_processes: logger.warning( f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match " f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. " f"This inconsistency may result in the learning rate scheduler not functioning properly." ) # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) # tensorboard cannot handle list types for config tracker_config.pop("validation_prompt") tracker_config.pop("validation_image") accelerator.init_trackers(args.tracker_project_name, config=tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Create a pipeline for text encoding. We will move this pipeline to GPU/CPU as needed. text_encoding_pipeline = CogView4ControlPipeline.from_pretrained( args.pretrained_model_name_or_path, transformer=None, vae=None, torch_dtype=weight_dtype ) tokenizer = text_encoding_pipeline.tokenizer # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: logger.info(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.") args.resume_from_checkpoint = None initial_global_step = 0 else: logger.info(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 if accelerator.is_main_process and args.report_to == "wandb" and args.log_dataset_samples: logger.info("Logging some dataset samples.") formatted_images = [] formatted_control_images = [] all_prompts = [] for i, batch in enumerate(train_dataloader): images = (batch["pixel_values"] + 1) / 2 control_images = (batch["conditioning_pixel_values"] + 1) / 2 prompts = batch["captions"] if len(formatted_images) > 10: break for img, control_img, prompt in zip(images, control_images, prompts): formatted_images.append(img) formatted_control_images.append(control_img) all_prompts.append(prompt) logged_artifacts = [] for img, control_img, prompt in zip(formatted_images, formatted_control_images, all_prompts): logged_artifacts.append(wandb.Image(control_img, caption="Conditioning")) logged_artifacts.append(wandb.Image(img, caption=prompt)) wandb_tracker = [tracker for tracker in accelerator.trackers if tracker.name == "wandb"] wandb_tracker[0].log({"dataset_samples": logged_artifacts}) progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): cogview4_transformer.train() for step, batch in enumerate(train_dataloader): with accelerator.accumulate(cogview4_transformer): # Convert images to latent space # vae encode prompts = batch["captions"] attention_mask = tokenizer( prompts, padding="longest", # not use max length max_length=args.max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ).attention_mask.float() pixel_latents = encode_images(batch["pixel_values"], vae.to(accelerator.device), weight_dtype) control_latents = encode_images( batch["conditioning_pixel_values"], vae.to(accelerator.device), weight_dtype ) if args.offload: vae.cpu() # Sample a random timestep for each image # for weighting schemes where we sample timesteps non-uniformly bsz = pixel_latents.shape[0] noise = torch.randn_like(pixel_latents, device=accelerator.device, dtype=weight_dtype) u = compute_density_for_timestep_sampling( weighting_scheme=args.weighting_scheme, batch_size=bsz, logit_mean=args.logit_mean, logit_std=args.logit_std, mode_scale=args.mode_scale, ) # Add noise according for cogview4 indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() timesteps = noise_scheduler_copy.timesteps[indices].to(device=pixel_latents.device) sigmas = noise_scheduler_copy.sigmas[indices].to(device=pixel_latents.device) captions = batch["captions"] image_seq_lens = torch.tensor( pixel_latents.shape[2] * pixel_latents.shape[3] // patch_size**2, dtype=pixel_latents.dtype, device=pixel_latents.device, ) # H * W / VAE patch_size mu = torch.sqrt(image_seq_lens / 256) mu = mu * 0.75 + 0.25 scale_factors = mu / (mu + (1 / sigmas - 1) ** 1.0).to( dtype=pixel_latents.dtype, device=pixel_latents.device ) scale_factors = scale_factors.view(len(batch["captions"]), 1, 1, 1) noisy_model_input = (1.0 - scale_factors) * pixel_latents + scale_factors * noise concatenated_noisy_model_input = torch.cat([noisy_model_input, control_latents], dim=1) text_encoding_pipeline = text_encoding_pipeline.to("cuda") with torch.no_grad(): ( prompt_embeds, pooled_prompt_embeds, ) = text_encoding_pipeline.encode_prompt(captions, "") original_size = (args.resolution, args.resolution) original_size = torch.tensor([original_size], dtype=prompt_embeds.dtype, device=prompt_embeds.device) target_size = (args.resolution, args.resolution) target_size = torch.tensor([target_size], dtype=prompt_embeds.dtype, device=prompt_embeds.device) target_size = target_size.repeat(len(batch["captions"]), 1) original_size = original_size.repeat(len(batch["captions"]), 1) crops_coords_top_left = torch.tensor([(0, 0)], dtype=prompt_embeds.dtype, device=prompt_embeds.device) crops_coords_top_left = crops_coords_top_left.repeat(len(batch["captions"]), 1) # this could be optimized by not having to do any text encoding and just # doing zeros on specified shapes for `prompt_embeds` and `pooled_prompt_embeds` if args.proportion_empty_prompts and random.random() < args.proportion_empty_prompts: # Here, we directly pass 16 pad tokens from pooled_prompt_embeds to prompt_embeds. prompt_embeds = pooled_prompt_embeds if args.offload: text_encoding_pipeline = text_encoding_pipeline.to("cpu") # Predict. noise_pred_cond = cogview4_transformer( hidden_states=concatenated_noisy_model_input, encoder_hidden_states=prompt_embeds, timestep=timesteps, original_size=original_size, target_size=target_size, crop_coords=crops_coords_top_left, return_dict=False, attention_mask=attention_mask, )[0] # these weighting schemes use a uniform timestep sampling # and instead post-weight the loss weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) # flow-matching loss target = noise - pixel_latents weighting = weighting.view(len(batch["captions"]), 1, 1, 1) loss = torch.mean( (weighting.float() * (noise_pred_cond.float() - target.float()) ** 2).reshape(target.shape[0], -1), 1, ) loss = loss.mean() accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = cogview4_transformer.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 # DeepSpeed requires saving weights on every device; saving weights only on the main process would cause issues. if accelerator.distributed_type == DistributedType.DEEPSPEED or accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if args.validation_prompt is not None and global_step % args.validation_steps == 0: image_logs = log_validation( cogview4_transformer=cogview4_transformer, args=args, accelerator=accelerator, weight_dtype=weight_dtype, step=global_step, ) logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: cogview4_transformer = unwrap_model(cogview4_transformer) if args.upcast_before_saving: cogview4_transformer.to(torch.float32) cogview4_transformer.save_pretrained(args.output_dir) del cogview4_transformer del text_encoding_pipeline del vae free_memory() # Run a final round of validation. image_logs = None if args.validation_prompt is not None: image_logs = log_validation( cogview4_transformer=None, args=args, accelerator=accelerator, weight_dtype=weight_dtype, step=global_step, is_final_validation=True, ) if args.push_to_hub: save_model_card( repo_id, image_logs=image_logs, base_model=args.pretrained_model_name_or_path, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*", "checkpoint-*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/cogview4-control/train_control_cogview4.py", "license": "Apache License 2.0", "lines": 1101, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py
# Copyright 2025 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import numpy as np import torch from transformers import AutoTokenizer, GlmModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...models import AutoencoderKL, CogView4Transformer2DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from .pipeline_output import CogView4PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import CogView4ControlPipeline >>> pipe = CogView4ControlPipeline.from_pretrained("THUDM/CogView4-6B-Control", torch_dtype=torch.bfloat16) >>> control_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ... ) >>> prompt = "A bird in space" >>> image = pipe(prompt, control_image=control_image, height=1024, width=1024, guidance_scale=3.5).images[0] >>> image.save("cogview4-control.png") ``` """ # Copied from diffusers.pipelines.cogview4.pipeline_cogview4.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, base_shift: float = 0.25, max_shift: float = 0.75, ) -> float: m = (image_seq_len / base_seq_len) ** 0.5 mu = m * max_shift + base_shift return mu # Copied from diffusers.pipelines.cogview4.pipeline_cogview4.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) accepts_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if timesteps is not None and sigmas is not None: if not accepts_timesteps and not accepts_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep or sigma schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif timesteps is not None and sigmas is None: if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif timesteps is None and sigmas is not None: if not accepts_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class CogView4ControlPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using CogView4. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`GLMModel`]): Frozen text-encoder. CogView4 uses [glm-4-9b-hf](https://huggingface.co/THUDM/glm-4-9b-hf). tokenizer (`PreTrainedTokenizer`): Tokenizer of class [PreTrainedTokenizer](https://huggingface.co/docs/transformers/main/en/main_classes/tokenizer#transformers.PreTrainedTokenizer). transformer ([`CogView4Transformer2DModel`]): A text conditioned `CogView4Transformer2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ _optional_components = [] model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: GlmModel, vae: AutoencoderKL, transformer: CogView4Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.cogview4.pipeline_cogview4.CogView4Pipeline._get_glm_embeds def _get_glm_embeds( self, prompt: str | list[str] = None, max_sequence_length: int = 1024, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt text_inputs = self.tokenizer( prompt, padding="longest", # not use max length max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) current_length = text_input_ids.shape[1] pad_length = (16 - (current_length % 16)) % 16 if pad_length > 0: pad_ids = torch.full( (text_input_ids.shape[0], pad_length), fill_value=self.tokenizer.pad_token_id, dtype=text_input_ids.dtype, device=text_input_ids.device, ) text_input_ids = torch.cat([pad_ids, text_input_ids], dim=1) prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=True).hidden_states[-2] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) return prompt_embeds # Copied from diffusers.pipelines.cogview4.pipeline_cogview4.CogView4Pipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_images_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 1024, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_images_per_prompt (`int`, *optional*, defaults to 1): Number of images that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype max_sequence_length (`int`, defaults to `1024`): Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results. """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_glm_embeds(prompt, max_sequence_length, device, dtype) seq_len = prompt_embeds.size(1) prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_glm_embeds(negative_prompt, max_sequence_length, device, dtype) seq_len = negative_prompt_embeds.size(1) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds, negative_prompt_embeds def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device) shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0, output_size=image.shape[0] * repeat_by) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image def check_inputs( self, prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) @property def guidance_scale(self): return self._guidance_scale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] | None = None, negative_prompt: str | list[str] | None = None, control_image: PipelineImageInput = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, timesteps: list[int] | None = None, sigmas: list[float] | None = None, guidance_scale: float = 5.0, num_images_per_prompt: int = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: torch.FloatTensor | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, original_size: tuple[int, int] | None = None, crops_coords_top_left: tuple[int, int] = (0, 0), output_type: str = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 1024, ) -> CogView4PipelineOutput | tuple: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. If not provided, it is set to 1024. width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. If not provided it is set to 1024. num_inference_steps (`int`, *optional*, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`list[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to `5.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to `1`): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. original_size (`tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.pipeline_CogView4.CogView4PipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `224`): Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results. Examples: Returns: [`~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput`] or `tuple`: [`~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = (height, width) # Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds, negative_prompt_embeds, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False # Default call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, negative_prompt, self.do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) # Prepare latents latent_channels = self.transformer.config.in_channels // 2 control_image = self.prepare_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.vae.dtype, ) height, width = control_image.shape[-2:] vae_shift_factor = 0 control_image = self.vae.encode(control_image).latent_dist.sample() control_image = (control_image - vae_shift_factor) * self.vae.config.scaling_factor latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, torch.float32, device, generator, latents, ) # Prepare additional timestep conditions original_size = torch.tensor([original_size], dtype=prompt_embeds.dtype, device=device) target_size = torch.tensor([target_size], dtype=prompt_embeds.dtype, device=device) crops_coords_top_left = torch.tensor([crops_coords_top_left], dtype=prompt_embeds.dtype, device=device) original_size = original_size.repeat(batch_size * num_images_per_prompt, 1) target_size = target_size.repeat(batch_size * num_images_per_prompt, 1) crops_coords_top_left = crops_coords_top_left.repeat(batch_size * num_images_per_prompt, 1) # Prepare timesteps image_seq_len = ((height // self.vae_scale_factor) * (width // self.vae_scale_factor)) // ( self.transformer.config.patch_size**2 ) timesteps = ( np.linspace(self.scheduler.config.num_train_timesteps, 1.0, num_inference_steps) if timesteps is None else np.array(timesteps) ) timesteps = timesteps.astype(np.int64).astype(np.float32) sigmas = timesteps / self.scheduler.config.num_train_timesteps if sigmas is None else sigmas mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("base_shift", 0.25), self.scheduler.config.get("max_shift", 0.75), ) if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps, sigmas, mu=mu ) self._num_timesteps = len(timesteps) # Denoising loop transformer_dtype = self.transformer.dtype num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = torch.cat([latents, control_image], dim=1).to(transformer_dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]) noise_pred_cond = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, original_size=original_size, target_size=target_size, crop_coords=crops_coords_top_left, attention_kwargs=attention_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=negative_prompt_embeds, timestep=timestep, original_size=original_size, target_size=target_size, crop_coords=crops_coords_top_left, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) else: noise_pred = noise_pred_cond latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] # call the callback, if provided if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, self.scheduler.sigmas[i], callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False, generator=generator)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return CogView4PipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py", "license": "Apache License 2.0", "lines": 643, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/remote/test_remote_encode.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import PIL.Image import torch from diffusers.utils import load_image from diffusers.utils.constants import ( DECODE_ENDPOINT_FLUX, DECODE_ENDPOINT_SD_V1, DECODE_ENDPOINT_SD_XL, ENCODE_ENDPOINT_FLUX, ENCODE_ENDPOINT_SD_V1, ENCODE_ENDPOINT_SD_XL, ) from diffusers.utils.remote_utils import ( remote_decode, remote_encode, ) from ..testing_utils import ( enable_full_determinism, slow, ) enable_full_determinism() IMAGE = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg?download=true" class RemoteAutoencoderKLEncodeMixin: channels: int = None endpoint: str = None decode_endpoint: str = None dtype: torch.dtype = None scaling_factor: float = None shift_factor: float = None image: PIL.Image.Image = None def get_dummy_inputs(self): if self.image is None: self.image = load_image(IMAGE) inputs = { "endpoint": self.endpoint, "image": self.image, "scaling_factor": self.scaling_factor, "shift_factor": self.shift_factor, } return inputs def test_image_input(self): inputs = self.get_dummy_inputs() height, width = inputs["image"].height, inputs["image"].width output = remote_encode(**inputs) self.assertEqual(list(output.shape), [1, self.channels, height // 8, width // 8]) decoded = remote_decode( tensor=output, endpoint=self.decode_endpoint, scaling_factor=self.scaling_factor, shift_factor=self.shift_factor, image_format="png", ) self.assertEqual(decoded.height, height) self.assertEqual(decoded.width, width) # image_slice = torch.from_numpy(np.array(inputs["image"])[0, -3:, -3:].flatten()) # decoded_slice = torch.from_numpy(np.array(decoded)[0, -3:, -3:].flatten()) # TODO: how to test this? encode->decode is lossy. expected slice of encoded latent? class RemoteAutoencoderKLSDv1Tests( RemoteAutoencoderKLEncodeMixin, unittest.TestCase, ): channels = 4 endpoint = ENCODE_ENDPOINT_SD_V1 decode_endpoint = DECODE_ENDPOINT_SD_V1 dtype = torch.float16 scaling_factor = 0.18215 shift_factor = None class RemoteAutoencoderKLSDXLTests( RemoteAutoencoderKLEncodeMixin, unittest.TestCase, ): channels = 4 endpoint = ENCODE_ENDPOINT_SD_XL decode_endpoint = DECODE_ENDPOINT_SD_XL dtype = torch.float16 scaling_factor = 0.13025 shift_factor = None class RemoteAutoencoderKLFluxTests( RemoteAutoencoderKLEncodeMixin, unittest.TestCase, ): channels = 16 endpoint = ENCODE_ENDPOINT_FLUX decode_endpoint = DECODE_ENDPOINT_FLUX dtype = torch.bfloat16 scaling_factor = 0.3611 shift_factor = 0.1159 class RemoteAutoencoderKLEncodeSlowTestMixin: channels: int = 4 endpoint: str = None decode_endpoint: str = None dtype: torch.dtype = None scaling_factor: float = None shift_factor: float = None image: PIL.Image.Image = None def get_dummy_inputs(self): if self.image is None: self.image = load_image(IMAGE) inputs = { "endpoint": self.endpoint, "image": self.image, "scaling_factor": self.scaling_factor, "shift_factor": self.shift_factor, } return inputs def test_multi_res(self): inputs = self.get_dummy_inputs() for height in { 320, 512, 640, 704, 896, 1024, 1208, 1384, 1536, 1608, 1864, 2048, }: for width in { 320, 512, 640, 704, 896, 1024, 1208, 1384, 1536, 1608, 1864, 2048, }: inputs["image"] = inputs["image"].resize( ( width, height, ) ) output = remote_encode(**inputs) self.assertEqual(list(output.shape), [1, self.channels, height // 8, width // 8]) decoded = remote_decode( tensor=output, endpoint=self.decode_endpoint, scaling_factor=self.scaling_factor, shift_factor=self.shift_factor, image_format="png", ) self.assertEqual(decoded.height, height) self.assertEqual(decoded.width, width) decoded.save(f"test_multi_res_{height}_{width}.png") @slow class RemoteAutoencoderKLSDv1SlowTests( RemoteAutoencoderKLEncodeSlowTestMixin, unittest.TestCase, ): endpoint = ENCODE_ENDPOINT_SD_V1 decode_endpoint = DECODE_ENDPOINT_SD_V1 dtype = torch.float16 scaling_factor = 0.18215 shift_factor = None @slow class RemoteAutoencoderKLSDXLSlowTests( RemoteAutoencoderKLEncodeSlowTestMixin, unittest.TestCase, ): endpoint = ENCODE_ENDPOINT_SD_XL decode_endpoint = DECODE_ENDPOINT_SD_XL dtype = torch.float16 scaling_factor = 0.13025 shift_factor = None @slow class RemoteAutoencoderKLFluxSlowTests( RemoteAutoencoderKLEncodeSlowTestMixin, unittest.TestCase, ): channels = 16 endpoint = ENCODE_ENDPOINT_FLUX decode_endpoint = DECODE_ENDPOINT_FLUX dtype = torch.bfloat16 scaling_factor = 0.3611 shift_factor = 0.1159
{ "repo_id": "huggingface/diffusers", "file_path": "tests/remote/test_remote_encode.py", "license": "Apache License 2.0", "lines": 198, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/research_projects/anytext/anytext.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # Copyright (c) Alibaba, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Based on [AnyText: Multilingual Visual Text Generation And Editing](https://huggingface.co/papers/2311.03054). # Authors: Yuxiang Tuo, Wangmeng Xiang, Jun-Yan He, Yifeng Geng, Xuansong Xie # Code: https://github.com/tyxsspa/AnyText with Apache-2.0 license # # Adapted to Diffusers by [M. Tolga Cangöz](https://github.com/tolgacangoz). import inspect import math import os import re import sys import unicodedata from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union import cv2 import numpy as np import PIL.Image import torch import torch.nn.functional as F from huggingface_hub import hf_hub_download from ocr_recog.RecModel import RecModel from PIL import Image, ImageDraw, ImageFont from safetensors.torch import load_file from skimage.transform._geometric import _umeyama as get_sym_mat from torch import nn from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from transformers.modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.models.modeling_utils import ModelMixin from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.constants import HF_MODULES_CACHE from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor class Checker: def __init__(self): pass def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F) or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) ): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or self._is_control(char): continue if self._is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) def _is_control(self, char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat in ("Cc", "Cf"): return True return False def _is_whitespace(self, char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False checker = Checker() PLACE_HOLDER = "*" logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> # This example requires the `anytext_controlnet.py` file: >>> # !git clone --depth 1 https://github.com/huggingface/diffusers.git >>> # %cd diffusers/examples/research_projects/anytext >>> # Let's choose a font file shared by an HF staff: >>> # !wget https://huggingface.co/spaces/ysharma/TranslateQuotesInImageForwards/resolve/main/arial-unicode-ms.ttf >>> import torch >>> from diffusers import DiffusionPipeline >>> from anytext_controlnet import AnyTextControlNetModel >>> from diffusers.utils import load_image >>> anytext_controlnet = AnyTextControlNetModel.from_pretrained("tolgacangoz/anytext-controlnet", torch_dtype=torch.float16, ... variant="fp16",) >>> pipe = DiffusionPipeline.from_pretrained("tolgacangoz/anytext", font_path="arial-unicode-ms.ttf", ... controlnet=anytext_controlnet, torch_dtype=torch.float16, ... trust_remote_code=False, # One needs to give permission to run this pipeline's code ... ).to("cuda") >>> # generate image >>> prompt = 'photo of caramel macchiato coffee on the table, top-down perspective, with "Any" "Text" written on it using cream' >>> draw_pos = load_image("https://raw.githubusercontent.com/tyxsspa/AnyText/refs/heads/main/example_images/gen9.png") >>> # There are two modes: "generate" and "edit". "edit" mode requires `ori_image` parameter for the image to be edited. >>> image = pipe(prompt, num_inference_steps=20, mode="generate", draw_pos=draw_pos, ... ).images[0] >>> image ``` """ def get_clip_token_for_string(tokenizer, string): batch_encoding = tokenizer( string, truncation=True, max_length=77, return_length=True, return_overflowing_tokens=False, padding="max_length", return_tensors="pt", ) tokens = batch_encoding["input_ids"] assert torch.count_nonzero(tokens - 49407) == 2, ( f"String '{string}' maps to more than a single token. Please use another string" ) return tokens[0, 1] def get_recog_emb(encoder, img_list): _img_list = [(img.repeat(1, 3, 1, 1) * 255)[0] for img in img_list] encoder.predictor.eval() _, preds_neck = encoder.pred_imglist(_img_list, show_debug=False) return preds_neck class EmbeddingManager(ModelMixin, ConfigMixin): @register_to_config def __init__( self, embedder, placeholder_string="*", use_fp16=False, token_dim=768, get_recog_emb=None, ): super().__init__() get_token_for_string = partial(get_clip_token_for_string, embedder.tokenizer) self.proj = nn.Linear(40 * 64, token_dim) proj_dir = hf_hub_download( repo_id="tolgacangoz/anytext", filename="text_embedding_module/proj.safetensors", cache_dir=HF_MODULES_CACHE, ) self.proj.load_state_dict(load_file(proj_dir, device=str(embedder.device))) if use_fp16: self.proj = self.proj.to(dtype=torch.float16) self.placeholder_token = get_token_for_string(placeholder_string) @torch.no_grad() def encode_text(self, text_info): if self.config.get_recog_emb is None: self.config.get_recog_emb = partial(get_recog_emb, self.recog) gline_list = [] for i in range(len(text_info["n_lines"])): # sample index in a batch n_lines = text_info["n_lines"][i] for j in range(n_lines): # line gline_list += [text_info["gly_line"][j][i : i + 1]] if len(gline_list) > 0: recog_emb = self.config.get_recog_emb(gline_list) enc_glyph = self.proj(recog_emb.reshape(recog_emb.shape[0], -1).to(self.proj.weight.dtype)) self.text_embs_all = [] n_idx = 0 for i in range(len(text_info["n_lines"])): # sample index in a batch n_lines = text_info["n_lines"][i] text_embs = [] for j in range(n_lines): # line text_embs += [enc_glyph[n_idx : n_idx + 1]] n_idx += 1 self.text_embs_all += [text_embs] @torch.no_grad() def forward( self, tokenized_text, embedded_text, ): b, device = tokenized_text.shape[0], tokenized_text.device for i in range(b): idx = tokenized_text[i] == self.placeholder_token.to(device) if sum(idx) > 0: if i >= len(self.text_embs_all): logger.warning("truncation for log images...") break text_emb = torch.cat(self.text_embs_all[i], dim=0) if sum(idx) != len(text_emb): logger.warning("truncation for long caption...") text_emb = text_emb.to(embedded_text.device) embedded_text[i][idx] = text_emb[: sum(idx)] return embedded_text def embedding_parameters(self): return self.parameters() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) def min_bounding_rect(img): ret, thresh = cv2.threshold(img, 127, 255, 0) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(contours) == 0: print("Bad contours, using fake bbox...") return np.array([[0, 0], [100, 0], [100, 100], [0, 100]]) max_contour = max(contours, key=cv2.contourArea) rect = cv2.minAreaRect(max_contour) box = cv2.boxPoints(rect) box = np.int0(box) # sort x_sorted = sorted(box, key=lambda x: x[0]) left = x_sorted[:2] right = x_sorted[2:] left = sorted(left, key=lambda x: x[1]) (tl, bl) = left right = sorted(right, key=lambda x: x[1]) (tr, br) = right if tl[1] > bl[1]: (tl, bl) = (bl, tl) if tr[1] > br[1]: (tr, br) = (br, tr) return np.array([tl, tr, br, bl]) def adjust_image(box, img): pts1 = np.float32([box[0], box[1], box[2], box[3]]) width = max(np.linalg.norm(pts1[0] - pts1[1]), np.linalg.norm(pts1[2] - pts1[3])) height = max(np.linalg.norm(pts1[0] - pts1[3]), np.linalg.norm(pts1[1] - pts1[2])) pts2 = np.float32([[0, 0], [width, 0], [width, height], [0, height]]) # get transform matrix M = get_sym_mat(pts1, pts2, estimate_scale=True) C, H, W = img.shape T = np.array([[2 / W, 0, -1], [0, 2 / H, -1], [0, 0, 1]]) theta = np.linalg.inv(T @ M @ np.linalg.inv(T)) theta = torch.from_numpy(theta[:2, :]).unsqueeze(0).type(torch.float32).to(img.device) grid = F.affine_grid(theta, torch.Size([1, C, H, W]), align_corners=True) result = F.grid_sample(img.unsqueeze(0), grid, align_corners=True) result = torch.clamp(result.squeeze(0), 0, 255) # crop result = result[:, : int(height), : int(width)] return result def crop_image(src_img, mask): box = min_bounding_rect(mask) result = adjust_image(box, src_img) if len(result.shape) == 2: result = torch.stack([result] * 3, axis=-1) return result def create_predictor(model_lang="ch", device="cpu", use_fp16=False): model_dir = hf_hub_download( repo_id="tolgacangoz/anytext", filename="text_embedding_module/OCR/ppv3_rec.pth", cache_dir=HF_MODULES_CACHE, ) if not os.path.exists(model_dir): raise ValueError("not find model file path {}".format(model_dir)) if model_lang == "ch": n_class = 6625 elif model_lang == "en": n_class = 97 else: raise ValueError(f"Unsupported OCR recog model_lang: {model_lang}") rec_config = { "in_channels": 3, "backbone": {"type": "MobileNetV1Enhance", "scale": 0.5, "last_conv_stride": [1, 2], "last_pool_type": "avg"}, "neck": { "type": "SequenceEncoder", "encoder_type": "svtr", "dims": 64, "depth": 2, "hidden_dims": 120, "use_guide": True, }, "head": {"type": "CTCHead", "fc_decay": 0.00001, "out_channels": n_class, "return_feats": True}, } rec_model = RecModel(rec_config) state_dict = torch.load(model_dir, map_location=device) rec_model.load_state_dict(state_dict) return rec_model def _check_image_file(path): img_end = ("tiff", "tif", "bmp", "rgb", "jpg", "png", "jpeg") return path.lower().endswith(tuple(img_end)) def get_image_file_list(img_file): imgs_lists = [] if img_file is None or not os.path.exists(img_file): raise Exception("not found any img file in {}".format(img_file)) if os.path.isfile(img_file) and _check_image_file(img_file): imgs_lists.append(img_file) elif os.path.isdir(img_file): for single_file in os.listdir(img_file): file_path = os.path.join(img_file, single_file) if os.path.isfile(file_path) and _check_image_file(file_path): imgs_lists.append(file_path) if len(imgs_lists) == 0: raise Exception("not found any img file in {}".format(img_file)) imgs_lists = sorted(imgs_lists) return imgs_lists class TextRecognizer(object): def __init__(self, args, predictor): self.rec_image_shape = [int(v) for v in args["rec_image_shape"].split(",")] self.rec_batch_num = args["rec_batch_num"] self.predictor = predictor self.chars = self.get_char_dict(args["rec_char_dict_path"]) self.char2id = {x: i for i, x in enumerate(self.chars)} self.is_onnx = not isinstance(self.predictor, torch.nn.Module) self.use_fp16 = args["use_fp16"] # img: CHW def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape assert imgC == img.shape[0] imgW = int((imgH * max_wh_ratio)) h, w = img.shape[1:] ratio = w / float(h) if math.ceil(imgH * ratio) > imgW: resized_w = imgW else: resized_w = int(math.ceil(imgH * ratio)) resized_image = torch.nn.functional.interpolate( img.unsqueeze(0), size=(imgH, resized_w), mode="bilinear", align_corners=True, ) resized_image /= 255.0 resized_image -= 0.5 resized_image /= 0.5 padding_im = torch.zeros((imgC, imgH, imgW), dtype=torch.float32).to(img.device) padding_im[:, :, 0:resized_w] = resized_image[0] return padding_im # img_list: list of tensors with shape chw 0-255 def pred_imglist(self, img_list, show_debug=False): img_num = len(img_list) assert img_num > 0 # Calculate the aspect ratio of all text bars width_list = [] for img in img_list: width_list.append(img.shape[2] / float(img.shape[1])) # Sorting can speed up the recognition process indices = torch.from_numpy(np.argsort(np.array(width_list))) batch_num = self.rec_batch_num preds_all = [None] * img_num preds_neck_all = [None] * img_num for beg_img_no in range(0, img_num, batch_num): end_img_no = min(img_num, beg_img_no + batch_num) norm_img_batch = [] imgC, imgH, imgW = self.rec_image_shape[:3] max_wh_ratio = imgW / imgH for ino in range(beg_img_no, end_img_no): h, w = img_list[indices[ino]].shape[1:] if h > w * 1.2: img = img_list[indices[ino]] img = torch.transpose(img, 1, 2).flip(dims=[1]) img_list[indices[ino]] = img h, w = img.shape[1:] # wh_ratio = w * 1.0 / h # max_wh_ratio = max(max_wh_ratio, wh_ratio) # comment to not use different ratio for ino in range(beg_img_no, end_img_no): norm_img = self.resize_norm_img(img_list[indices[ino]], max_wh_ratio) if self.use_fp16: norm_img = norm_img.half() norm_img = norm_img.unsqueeze(0) norm_img_batch.append(norm_img) norm_img_batch = torch.cat(norm_img_batch, dim=0) if show_debug: for i in range(len(norm_img_batch)): _img = norm_img_batch[i].permute(1, 2, 0).detach().cpu().numpy() _img = (_img + 0.5) * 255 _img = _img[:, :, ::-1] file_name = f"{indices[beg_img_no + i]}" if os.path.exists(file_name + ".jpg"): file_name += "_2" # ori image cv2.imwrite(file_name + ".jpg", _img) if self.is_onnx: input_dict = {} input_dict[self.predictor.get_inputs()[0].name] = norm_img_batch.detach().cpu().numpy() outputs = self.predictor.run(None, input_dict) preds = {} preds["ctc"] = torch.from_numpy(outputs[0]) preds["ctc_neck"] = [torch.zeros(1)] * img_num else: preds = self.predictor(norm_img_batch.to(next(self.predictor.parameters()).device)) for rno in range(preds["ctc"].shape[0]): preds_all[indices[beg_img_no + rno]] = preds["ctc"][rno] preds_neck_all[indices[beg_img_no + rno]] = preds["ctc_neck"][rno] return torch.stack(preds_all, dim=0), torch.stack(preds_neck_all, dim=0) def get_char_dict(self, character_dict_path): character_str = [] with open(character_dict_path, "rb") as fin: lines = fin.readlines() for line in lines: line = line.decode("utf-8").strip("\n").strip("\r\n") character_str.append(line) dict_character = list(character_str) dict_character = ["sos"] + dict_character + [" "] # eos is space return dict_character def get_text(self, order): char_list = [self.chars[text_id] for text_id in order] return "".join(char_list) def decode(self, mat): text_index = mat.detach().cpu().numpy().argmax(axis=1) ignored_tokens = [0] selection = np.ones(len(text_index), dtype=bool) selection[1:] = text_index[1:] != text_index[:-1] for ignored_token in ignored_tokens: selection &= text_index != ignored_token return text_index[selection], np.where(selection)[0] def get_ctcloss(self, preds, gt_text, weight): if not isinstance(weight, torch.Tensor): weight = torch.tensor(weight).to(preds.device) ctc_loss = torch.nn.CTCLoss(reduction="none") log_probs = preds.log_softmax(dim=2).permute(1, 0, 2) # NTC-->TNC targets = [] target_lengths = [] for t in gt_text: targets += [self.char2id.get(i, len(self.chars) - 1) for i in t] target_lengths += [len(t)] targets = torch.tensor(targets).to(preds.device) target_lengths = torch.tensor(target_lengths).to(preds.device) input_lengths = torch.tensor([log_probs.shape[0]] * (log_probs.shape[1])).to(preds.device) loss = ctc_loss(log_probs, targets, input_lengths, target_lengths) loss = loss / input_lengths * weight return loss class AbstractEncoder(nn.Module): def __init__(self): super().__init__() def encode(self, *args, **kwargs): raise NotImplementedError class FrozenCLIPEmbedderT3(AbstractEncoder, ModelMixin, ConfigMixin): """Uses the CLIP transformer encoder for text (from Hugging Face)""" @register_to_config def __init__( self, device="cpu", max_length=77, freeze=True, use_fp16=False, variant: str | None = None, ): super().__init__() self.tokenizer = CLIPTokenizer.from_pretrained("tolgacangoz/anytext", subfolder="tokenizer") self.transformer = CLIPTextModel.from_pretrained( "tolgacangoz/anytext", subfolder="text_encoder", torch_dtype=torch.float16 if use_fp16 else torch.float32, variant="fp16" if use_fp16 else None, ) if freeze: self.freeze() def embedding_forward( self, input_ids=None, position_ids=None, inputs_embeds=None, embedding_manager=None, ): seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) if embedding_manager is not None: inputs_embeds = embedding_manager(input_ids, inputs_embeds) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings self.transformer.text_model.embeddings.forward = embedding_forward.__get__( self.transformer.text_model.embeddings ) def encoder_forward( self, inputs_embeds, attention_mask=None, causal_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return hidden_states self.transformer.text_model.encoder.forward = encoder_forward.__get__(self.transformer.text_model.encoder) def text_encoder_forward( self, input_ids=None, attention_mask=None, position_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None, embedding_manager=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify either input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings( input_ids=input_ids, position_ids=position_ids, embedding_manager=embedding_manager ) # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) last_hidden_state = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = self.final_layer_norm(last_hidden_state) return last_hidden_state self.transformer.text_model.forward = text_encoder_forward.__get__(self.transformer.text_model) def transformer_forward( self, input_ids=None, attention_mask=None, position_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None, embedding_manager=None, ): return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, embedding_manager=embedding_manager, ) self.transformer.forward = transformer_forward.__get__(self.transformer) def freeze(self): self.transformer = self.transformer.eval() for param in self.parameters(): param.requires_grad = False def forward(self, text, **kwargs): batch_encoding = self.tokenizer( text, truncation=False, max_length=self.config.max_length, return_length=True, return_overflowing_tokens=False, padding="longest", return_tensors="pt", ) input_ids = batch_encoding["input_ids"] tokens_list = self.split_chunks(input_ids) z_list = [] for tokens in tokens_list: tokens = tokens.to(self.device) _z = self.transformer(input_ids=tokens, **kwargs) z_list += [_z] return torch.cat(z_list, dim=1) def encode(self, text, **kwargs): return self(text, **kwargs) def split_chunks(self, input_ids, chunk_size=75): tokens_list = [] bs, n = input_ids.shape id_start = input_ids[:, 0].unsqueeze(1) # dim --> [bs, 1] id_end = input_ids[:, -1].unsqueeze(1) if n == 2: # empty caption tokens_list.append(torch.cat((id_start,) + (id_end,) * (chunk_size + 1), dim=1)) trimmed_encoding = input_ids[:, 1:-1] num_full_groups = (n - 2) // chunk_size for i in range(num_full_groups): group = trimmed_encoding[:, i * chunk_size : (i + 1) * chunk_size] group_pad = torch.cat((id_start, group, id_end), dim=1) tokens_list.append(group_pad) remaining_columns = (n - 2) % chunk_size if remaining_columns > 0: remaining_group = trimmed_encoding[:, -remaining_columns:] padding_columns = chunk_size - remaining_group.shape[1] padding = id_end.expand(bs, padding_columns) remaining_group_pad = torch.cat((id_start, remaining_group, padding, id_end), dim=1) tokens_list.append(remaining_group_pad) return tokens_list class TextEmbeddingModule(ModelMixin, ConfigMixin): @register_to_config def __init__(self, font_path, use_fp16=False, device="cpu"): super().__init__() font = ImageFont.truetype(font_path, 60) self.frozen_CLIP_embedder_t3 = FrozenCLIPEmbedderT3(device=device, use_fp16=use_fp16) self.embedding_manager = EmbeddingManager(self.frozen_CLIP_embedder_t3, use_fp16=use_fp16) self.text_predictor = create_predictor(device=device, use_fp16=use_fp16).eval() args = { "rec_image_shape": "3, 48, 320", "rec_batch_num": 6, "rec_char_dict_path": hf_hub_download( repo_id="tolgacangoz/anytext", filename="text_embedding_module/OCR/ppocr_keys_v1.txt", cache_dir=HF_MODULES_CACHE, ), "use_fp16": use_fp16, } self.embedding_manager.recog = TextRecognizer(args, self.text_predictor) self.register_to_config(font=font) @torch.no_grad() def forward( self, prompt, texts, negative_prompt, num_images_per_prompt, mode, draw_pos, sort_priority="↕", max_chars=77, revise_pos=False, h=512, w=512, ): if prompt is None and texts is None: raise ValueError("Prompt or texts must be provided!") # preprocess pos_imgs(if numpy, make sure it's white pos in black bg) if draw_pos is None: pos_imgs = np.zeros((w, h, 1)) if isinstance(draw_pos, PIL.Image.Image): pos_imgs = np.array(draw_pos)[..., ::-1] pos_imgs = 255 - pos_imgs elif isinstance(draw_pos, str): draw_pos = cv2.imread(draw_pos)[..., ::-1] if draw_pos is None: raise ValueError(f"Can't read draw_pos image from {draw_pos}!") pos_imgs = 255 - draw_pos elif isinstance(draw_pos, torch.Tensor): pos_imgs = draw_pos.cpu().numpy() else: if not isinstance(draw_pos, np.ndarray): raise ValueError(f"Unknown format of draw_pos: {type(draw_pos)}") if mode == "edit": pos_imgs = cv2.resize(pos_imgs, (w, h)) pos_imgs = pos_imgs[..., 0:1] pos_imgs = cv2.convertScaleAbs(pos_imgs) _, pos_imgs = cv2.threshold(pos_imgs, 254, 255, cv2.THRESH_BINARY) # separate pos_imgs pos_imgs = self.separate_pos_imgs(pos_imgs, sort_priority) if len(pos_imgs) == 0: pos_imgs = [np.zeros((h, w, 1))] n_lines = len(texts) if len(pos_imgs) < n_lines: if n_lines == 1 and texts[0] == " ": pass # text-to-image without text else: raise ValueError( f"Found {len(pos_imgs)} positions that < needed {n_lines} from prompt, check and try again!" ) elif len(pos_imgs) > n_lines: str_warning = f"Warning: found {len(pos_imgs)} positions that > needed {n_lines} from prompt." logger.warning(str_warning) # get pre_pos, poly_list, hint that needed for anytext pre_pos = [] poly_list = [] for input_pos in pos_imgs: if input_pos.mean() != 0: input_pos = input_pos[..., np.newaxis] if len(input_pos.shape) == 2 else input_pos poly, pos_img = self.find_polygon(input_pos) pre_pos += [pos_img / 255.0] poly_list += [poly] else: pre_pos += [np.zeros((h, w, 1))] poly_list += [None] np_hint = np.sum(pre_pos, axis=0).clip(0, 1) # prepare info dict text_info = {} text_info["glyphs"] = [] text_info["gly_line"] = [] text_info["positions"] = [] text_info["n_lines"] = [len(texts)] * num_images_per_prompt for i in range(len(texts)): text = texts[i] if len(text) > max_chars: str_warning = f'"{text}" length > max_chars: {max_chars}, will be cut off...' logger.warning(str_warning) text = text[:max_chars] gly_scale = 2 if pre_pos[i].mean() != 0: gly_line = self.draw_glyph(self.config.font, text) glyphs = self.draw_glyph2( self.config.font, text, poly_list[i], scale=gly_scale, width=w, height=h, add_space=False ) if revise_pos: resize_gly = cv2.resize(glyphs, (pre_pos[i].shape[1], pre_pos[i].shape[0])) new_pos = cv2.morphologyEx( (resize_gly * 255).astype(np.uint8), cv2.MORPH_CLOSE, kernel=np.ones((resize_gly.shape[0] // 10, resize_gly.shape[1] // 10), dtype=np.uint8), iterations=1, ) new_pos = new_pos[..., np.newaxis] if len(new_pos.shape) == 2 else new_pos contours, _ = cv2.findContours(new_pos, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if len(contours) != 1: str_warning = f"Fail to revise position {i} to bounding rect, remain position unchanged..." logger.warning(str_warning) else: rect = cv2.minAreaRect(contours[0]) poly = np.int0(cv2.boxPoints(rect)) pre_pos[i] = cv2.drawContours(new_pos, [poly], -1, 255, -1) / 255.0 else: glyphs = np.zeros((h * gly_scale, w * gly_scale, 1)) gly_line = np.zeros((80, 512, 1)) pos = pre_pos[i] text_info["glyphs"] += [self.arr2tensor(glyphs, num_images_per_prompt)] text_info["gly_line"] += [self.arr2tensor(gly_line, num_images_per_prompt)] text_info["positions"] += [self.arr2tensor(pos, num_images_per_prompt)] self.embedding_manager.encode_text(text_info) prompt_embeds = self.frozen_CLIP_embedder_t3.encode([prompt], embedding_manager=self.embedding_manager) self.embedding_manager.encode_text(text_info) negative_prompt_embeds = self.frozen_CLIP_embedder_t3.encode( [negative_prompt or ""], embedding_manager=self.embedding_manager ) return prompt_embeds, negative_prompt_embeds, text_info, np_hint def arr2tensor(self, arr, bs): arr = np.transpose(arr, (2, 0, 1)) _arr = torch.from_numpy(arr.copy()).float().cpu() if self.config.use_fp16: _arr = _arr.half() _arr = torch.stack([_arr for _ in range(bs)], dim=0) return _arr def separate_pos_imgs(self, img, sort_priority, gap=102): num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(img) components = [] for label in range(1, num_labels): component = np.zeros_like(img) component[labels == label] = 255 components.append((component, centroids[label])) if sort_priority == "↕": fir, sec = 1, 0 # top-down first elif sort_priority == "↔": fir, sec = 0, 1 # left-right first else: raise ValueError(f"Unknown sort_priority: {sort_priority}") components.sort(key=lambda c: (c[1][fir] // gap, c[1][sec] // gap)) sorted_components = [c[0] for c in components] return sorted_components def find_polygon(self, image, min_rect=False): contours, hierarchy = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) max_contour = max(contours, key=cv2.contourArea) # get contour with max area if min_rect: # get minimum enclosing rectangle rect = cv2.minAreaRect(max_contour) poly = np.int0(cv2.boxPoints(rect)) else: # get approximate polygon epsilon = 0.01 * cv2.arcLength(max_contour, True) poly = cv2.approxPolyDP(max_contour, epsilon, True) n, _, xy = poly.shape poly = poly.reshape(n, xy) cv2.drawContours(image, [poly], -1, 255, -1) return poly, image def draw_glyph(self, font, text): g_size = 50 W, H = (512, 80) new_font = font.font_variant(size=g_size) img = Image.new(mode="1", size=(W, H), color=0) draw = ImageDraw.Draw(img) left, top, right, bottom = new_font.getbbox(text) text_width = max(right - left, 5) text_height = max(bottom - top, 5) ratio = min(W * 0.9 / text_width, H * 0.9 / text_height) new_font = font.font_variant(size=int(g_size * ratio)) left, top, right, bottom = new_font.getbbox(text) text_width = right - left text_height = bottom - top x = (img.width - text_width) // 2 y = (img.height - text_height) // 2 - top // 2 draw.text((x, y), text, font=new_font, fill="white") img = np.expand_dims(np.array(img), axis=2).astype(np.float64) return img def draw_glyph2(self, font, text, polygon, vertAng=10, scale=1, width=512, height=512, add_space=True): enlarge_polygon = polygon * scale rect = cv2.minAreaRect(enlarge_polygon) box = cv2.boxPoints(rect) box = np.int0(box) w, h = rect[1] angle = rect[2] if angle < -45: angle += 90 angle = -angle if w < h: angle += 90 vert = False if abs(angle) % 90 < vertAng or abs(90 - abs(angle) % 90) % 90 < vertAng: _w = max(box[:, 0]) - min(box[:, 0]) _h = max(box[:, 1]) - min(box[:, 1]) if _h >= _w: vert = True angle = 0 img = np.zeros((height * scale, width * scale, 3), np.uint8) img = Image.fromarray(img) # infer font size image4ratio = Image.new("RGB", img.size, "white") draw = ImageDraw.Draw(image4ratio) _, _, _tw, _th = draw.textbbox(xy=(0, 0), text=text, font=font) text_w = min(w, h) * (_tw / _th) if text_w <= max(w, h): # add space if len(text) > 1 and not vert and add_space: for i in range(1, 100): text_space = self.insert_spaces(text, i) _, _, _tw2, _th2 = draw.textbbox(xy=(0, 0), text=text_space, font=font) if min(w, h) * (_tw2 / _th2) > max(w, h): break text = self.insert_spaces(text, i - 1) font_size = min(w, h) * 0.80 else: shrink = 0.75 if vert else 0.85 font_size = min(w, h) / (text_w / max(w, h)) * shrink new_font = font.font_variant(size=int(font_size)) left, top, right, bottom = new_font.getbbox(text) text_width = right - left text_height = bottom - top layer = Image.new("RGBA", img.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(layer) if not vert: draw.text( (rect[0][0] - text_width // 2, rect[0][1] - text_height // 2 - top), text, font=new_font, fill=(255, 255, 255, 255), ) else: x_s = min(box[:, 0]) + _w // 2 - text_height // 2 y_s = min(box[:, 1]) for c in text: draw.text((x_s, y_s), c, font=new_font, fill=(255, 255, 255, 255)) _, _t, _, _b = new_font.getbbox(c) y_s += _b rotated_layer = layer.rotate(angle, expand=1, center=(rect[0][0], rect[0][1])) x_offset = int((img.width - rotated_layer.width) / 2) y_offset = int((img.height - rotated_layer.height) / 2) img.paste(rotated_layer, (x_offset, y_offset), rotated_layer) img = np.expand_dims(np.array(img.convert("1")), axis=2).astype(np.float64) return img def insert_spaces(self, string, nSpace): if nSpace == 0: return string new_string = "" for char in string: new_string += char + " " * nSpace return new_string[:-nSpace] # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class AuxiliaryLatentModule(ModelMixin, ConfigMixin): @register_to_config def __init__( self, vae, device="cpu", ): super().__init__() @torch.no_grad() def forward( self, text_info, mode, draw_pos, ori_image, num_images_per_prompt, np_hint, h=512, w=512, ): if mode == "generate": edit_image = np.ones((h, w, 3)) * 127.5 # empty mask image elif mode == "edit": if draw_pos is None or ori_image is None: raise ValueError("Reference image and position image are needed for text editing!") if isinstance(ori_image, str): ori_image = cv2.imread(ori_image)[..., ::-1] if ori_image is None: raise ValueError(f"Can't read ori_image image from {ori_image}!") elif isinstance(ori_image, torch.Tensor): ori_image = ori_image.cpu().numpy() elif isinstance(ori_image, PIL.Image.Image): ori_image = np.array(ori_image.convert("RGB")) else: if not isinstance(ori_image, np.ndarray): raise ValueError(f"Unknown format of ori_image: {type(ori_image)}") edit_image = ori_image.clip(1, 255) # for mask reason edit_image = self.check_channels(edit_image) edit_image = self.resize_image( edit_image, max_length=768 ) # make w h multiple of 64, resize if w or h > max_length # get masked_x masked_img = ((edit_image.astype(np.float32) / 127.5) - 1.0) * (1 - np_hint) masked_img = np.transpose(masked_img, (2, 0, 1)) device = next(self.config.vae.parameters()).device dtype = next(self.config.vae.parameters()).dtype masked_img = torch.from_numpy(masked_img.copy()).float().to(device) if dtype == torch.float16: masked_img = masked_img.half() masked_x = ( retrieve_latents(self.config.vae.encode(masked_img[None, ...])) * self.config.vae.config.scaling_factor ).detach() if dtype == torch.float16: masked_x = masked_x.half() text_info["masked_x"] = torch.cat([masked_x for _ in range(num_images_per_prompt)], dim=0) glyphs = torch.cat(text_info["glyphs"], dim=1).sum(dim=1, keepdim=True) positions = torch.cat(text_info["positions"], dim=1).sum(dim=1, keepdim=True) return glyphs, positions, text_info def check_channels(self, image): channels = image.shape[2] if len(image.shape) == 3 else 1 if channels == 1: image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) elif channels > 3: image = image[:, :, :3] return image def resize_image(self, img, max_length=768): height, width = img.shape[:2] max_dimension = max(height, width) if max_dimension > max_length: scale_factor = max_length / max_dimension new_width = int(round(width * scale_factor)) new_height = int(round(height * scale_factor)) new_size = (new_width, new_height) img = cv2.resize(img, new_size) height, width = img.shape[:2] img = cv2.resize(img, (width - (width % 64), height - (height % 64))) return img def insert_spaces(self, string, nSpace): if nSpace == 0: return string new_string = "" for char in string: new_string += char + " " * nSpace return new_string[:-nSpace] # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class AnyTextPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): Provides additional conditioning to the `unet` during the denoising process. If you set multiple ControlNets as a list, the outputs from each ControlNet are added together to create one combined additional conditioning. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, font_path: str = None, text_embedding_module: Optional[TextEmbeddingModule] = None, auxiliary_latent_module: Optional[AuxiliaryLatentModule] = None, trust_remote_code: bool = False, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, ): super().__init__() if font_path is None: raise ValueError("font_path is required!") text_embedding_module = TextEmbeddingModule(font_path=font_path, use_fp16=unet.dtype == torch.float16) auxiliary_latent_module = AuxiliaryLatentModule(vae=vae) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, text_embedding_module=text_embedding_module, auxiliary_latent_module=auxiliary_latent_module, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.register_to_config(requires_safety_checker=requires_safety_checker) def modify_prompt(self, prompt): prompt = prompt.replace("“", '"') prompt = prompt.replace("”", '"') p = '"(.*?)"' strs = re.findall(p, prompt) if len(strs) == 0: strs = [" "] else: for s in strs: prompt = prompt.replace(f'"{s}"', f" {PLACE_HOLDER} ", 1) if self.is_chinese(prompt): if self.trans_pipe is None: return None, None old_prompt = prompt prompt = self.trans_pipe(input=prompt + " .")["translation"][:-1] print(f"Translate: {old_prompt} --> {prompt}") return prompt, strs def is_chinese(self, text): text = checker._clean_text(text) for char in text: cp = ord(char) if checker._is_chinese_char(cp): return True return False # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): image_embeds = [] if do_classifier_free_guidance: negative_image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) image_embeds.append(single_image_embeds[None, :]) if do_classifier_free_guidance: negative_image_embeds.append(single_negative_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) negative_image_embeds.append(single_negative_image_embeds) image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for i, single_image_embeds in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) if do_classifier_free_guidance: single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, # image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): print(controlnet_conditioning_scale) raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError( "A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. " "The conditioning scale must be fixed across the batch." ) elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, mode: str | None = "generate", draw_pos: Optional[Union[str, torch.Tensor]] = None, ori_image: Optional[Union[str, torch.Tensor]] = None, timesteps: List[int] = None, sigmas: List[float] = None, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, output_type: str | None = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, clip_skip: Optional[int] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. When `prompt` is a list, and if a list of images is passed for a single ControlNet, each will be paired with each prompt in the `prompt` list. This also applies to multiple ControlNets, where a list of image lists can be passed to batch for each prompt and each ControlNet. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): The ControlNet encoder tries to recognize the content of the input image even if you remove all prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, # image, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions prompt, texts = self.modify_prompt(prompt) # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) draw_pos = draw_pos.to(device=device) if isinstance(draw_pos, torch.Tensor) else draw_pos prompt_embeds, negative_prompt_embeds, text_info, np_hint = self.text_embedding_module( prompt, texts, negative_prompt, num_images_per_prompt, mode, draw_pos, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 3.5 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # 4. Prepare image if isinstance(controlnet, ControlNetModel): guided_hint = self.auxiliary_latent_module( text_info=text_info, mode=mode, draw_pos=draw_pos, ori_image=ori_image, num_images_per_prompt=num_images_per_prompt, np_hint=np_hint, ) height, width = 512, 512 else: assert False # 5. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) self._num_timesteps = len(timesteps) # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Add image embeds for IP-Adapter added_cond_kwargs = ( {"image_embeds": image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None ) # 7.2 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Relevant thread: # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input.to(self.controlnet.dtype), t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=guided_hint, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def to(self, *args, **kwargs): super().to(*args, **kwargs) self.text_embedding_module.to(*args, **kwargs) self.auxiliary_latent_module.to(*args, **kwargs) return self
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/anytext/anytext.py", "license": "Apache License 2.0", "lines": 2105, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/research_projects/anytext/anytext_controlnet.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Based on [AnyText: Multilingual Visual Text Generation And Editing](https://huggingface.co/papers/2311.03054). # Authors: Yuxiang Tuo, Wangmeng Xiang, Jun-Yan He, Yifeng Geng, Xuansong Xie # Code: https://github.com/tyxsspa/AnyText with Apache-2.0 license # # Adapted to Diffusers by [M. Tolga Cangöz](https://github.com/tolgacangoz). from typing import Any, Dict, Optional, Tuple, Union import torch from torch import nn from diffusers.configuration_utils import register_to_config from diffusers.models.controlnets.controlnet import ( ControlNetModel, ControlNetOutput, ) from diffusers.utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name class AnyTextControlNetConditioningEmbedding(nn.Module): """ Quoting from https://huggingface.co/papers/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full model) to encode image-space conditions ... into feature maps ..." """ def __init__( self, conditioning_embedding_channels: int, glyph_channels=1, position_channels=1, ): super().__init__() self.glyph_block = nn.Sequential( nn.Conv2d(glyph_channels, 8, 3, padding=1), nn.SiLU(), nn.Conv2d(8, 8, 3, padding=1), nn.SiLU(), nn.Conv2d(8, 16, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(16, 16, 3, padding=1), nn.SiLU(), nn.Conv2d(16, 32, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(32, 32, 3, padding=1), nn.SiLU(), nn.Conv2d(32, 96, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(96, 96, 3, padding=1), nn.SiLU(), nn.Conv2d(96, 256, 3, padding=1, stride=2), nn.SiLU(), ) self.position_block = nn.Sequential( nn.Conv2d(position_channels, 8, 3, padding=1), nn.SiLU(), nn.Conv2d(8, 8, 3, padding=1), nn.SiLU(), nn.Conv2d(8, 16, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(16, 16, 3, padding=1), nn.SiLU(), nn.Conv2d(16, 32, 3, padding=1, stride=2), nn.SiLU(), nn.Conv2d(32, 32, 3, padding=1), nn.SiLU(), nn.Conv2d(32, 64, 3, padding=1, stride=2), nn.SiLU(), ) self.fuse_block = nn.Conv2d(256 + 64 + 4, conditioning_embedding_channels, 3, padding=1) def forward(self, glyphs, positions, text_info): glyph_embedding = self.glyph_block(glyphs.to(self.glyph_block[0].weight.device)) position_embedding = self.position_block(positions.to(self.position_block[0].weight.device)) guided_hint = self.fuse_block(torch.cat([glyph_embedding, position_embedding, text_info["masked_x"]], dim=1)) return guided_hint class AnyTextControlNetModel(ControlNetModel): """ A AnyTextControlNetModel model. Args: in_channels (`int`, defaults to 4): The number of channels in the input sample. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, defaults to 2): The number of layers per block. downsample_padding (`int`, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, defaults to 1): The scale factor to use for the mid block. act_fn (`str`, defaults to "silu"): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If None, normalization and activation layers is skipped in post-processing. norm_eps (`float`, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): The dimension of the attention heads. use_linear_projection (`bool`, defaults to `False`): class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. num_class_embeds (`int`, *optional*, defaults to 0): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. upcast_attention (`bool`, defaults to `False`): resnet_time_scale_shift (`str`, defaults to `"default"`): Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`): The channel order of conditional image. Will convert to `rgb` if it's `bgr`. conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): The tuple of output channel for each block in the `conditioning_embedding` layer. global_pool_conditions (`bool`, defaults to `False`): TODO(Patrick) - unused parameter. addition_embed_type_num_heads (`int`, defaults to 64): The number of heads to use for the `TextTimeEmbedding` layer. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 4, conditioning_channels: int = 1, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), mid_block_type: str | None = "UNetMidBlock2DCrossAttn", only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 1280, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: str | None = None, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, class_embed_type: str | None = None, addition_embed_type: str | None = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", projection_class_embeddings_input_dim: Optional[int] = None, controlnet_conditioning_channel_order: str = "rgb", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, addition_embed_type_num_heads: int = 64, ): super().__init__( in_channels, conditioning_channels, flip_sin_to_cos, freq_shift, down_block_types, mid_block_type, only_cross_attention, block_out_channels, layers_per_block, downsample_padding, mid_block_scale_factor, act_fn, norm_num_groups, norm_eps, cross_attention_dim, transformer_layers_per_block, encoder_hid_dim, encoder_hid_dim_type, attention_head_dim, num_attention_heads, use_linear_projection, class_embed_type, addition_embed_type, addition_time_embed_dim, num_class_embeds, upcast_attention, resnet_time_scale_shift, projection_class_embeddings_input_dim, controlnet_conditioning_channel_order, conditioning_embedding_out_channels, global_pool_conditions, addition_embed_type_num_heads, ) # control net conditioning embedding self.controlnet_cond_embedding = AnyTextControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0], glyph_channels=conditioning_channels, position_channels=conditioning_channels, ) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, controlnet_cond: torch.Tensor, conditioning_scale: float = 1.0, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guess_mode: bool = False, return_dict: bool = True, ) -> Union[ControlNetOutput, Tuple[Tuple[torch.Tensor, ...], torch.Tensor]]: """ The [`~PromptDiffusionControlNetModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor. timestep (`Union[torch.Tensor, float, int]`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.Tensor`): The encoder hidden states. #controlnet_cond (`torch.Tensor`): # The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. conditioning_scale (`float`, defaults to `1.0`): The scale factor for ControlNet outputs. class_labels (`torch.Tensor`, *optional*, defaults to `None`): Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep embeddings. attention_mask (`torch.Tensor`, *optional*, defaults to `None`): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. added_cond_kwargs (`dict`): Additional conditions for the Stable Diffusion XL UNet. cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): A kwargs dictionary that if specified is passed along to the `AttnProcessor`. guess_mode (`bool`, defaults to `False`): In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. return_dict (`bool`, defaults to `True`): Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. Returns: [`~models.controlnet.ControlNetOutput`] **or** `tuple`: If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ # check channel order channel_order = self.config.controlnet_conditioning_channel_order if channel_order == "rgb": # in rgb order by default ... # elif channel_order == "bgr": # controlnet_cond = torch.flip(controlnet_cond, dims=[1]) else: raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") # prepare attention_mask if attention_mask is not None: attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" if isinstance(timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) emb = emb + class_emb if self.config.addition_embed_type is not None: if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_time": if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) emb = emb + aug_emb if aug_emb is not None else emb # 2. pre-process sample = self.conv_in(sample) controlnet_cond = self.controlnet_cond_embedding(*controlnet_cond) sample = sample + controlnet_cond # 3. down down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) else: sample = self.mid_block(sample, emb) # 5. Control net blocks controlnet_down_block_res_samples = () for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): down_block_res_sample = controlnet_block(down_block_res_sample) controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = controlnet_down_block_res_samples mid_block_res_sample = self.controlnet_mid_block(sample) # 6. scaling if guess_mode and not self.config.global_pool_conditions: scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 scales = scales * conditioning_scale down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] mid_block_res_sample = mid_block_res_sample * scales[-1] # last one else: down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample = mid_block_res_sample * conditioning_scale if self.config.global_pool_conditions: down_block_res_samples = [ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples ] mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) if not return_dict: return (down_block_res_samples, mid_block_res_sample) return ControlNetOutput( down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample ) # Copied from diffusers.models.controlnet.zero_module def zero_module(module): for p in module.parameters(): nn.init.zeros_(p) return module
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/anytext/anytext_controlnet.py", "license": "Apache License 2.0", "lines": 411, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/research_projects/anytext/ocr_recog/RNN.py
import torch from torch import nn from .RecSVTR import Block class Swish(nn.Module): def __int__(self): super(Swish, self).__int__() def forward(self, x): return x * torch.sigmoid(x) class Im2Im(nn.Module): def __init__(self, in_channels, **kwargs): super().__init__() self.out_channels = in_channels def forward(self, x): return x class Im2Seq(nn.Module): def __init__(self, in_channels, **kwargs): super().__init__() self.out_channels = in_channels def forward(self, x): B, C, H, W = x.shape # assert H == 1 x = x.reshape(B, C, H * W) x = x.permute((0, 2, 1)) return x class EncoderWithRNN(nn.Module): def __init__(self, in_channels, **kwargs): super(EncoderWithRNN, self).__init__() hidden_size = kwargs.get("hidden_size", 256) self.out_channels = hidden_size * 2 self.lstm = nn.LSTM(in_channels, hidden_size, bidirectional=True, num_layers=2, batch_first=True) def forward(self, x): self.lstm.flatten_parameters() x, _ = self.lstm(x) return x class SequenceEncoder(nn.Module): def __init__(self, in_channels, encoder_type="rnn", **kwargs): super(SequenceEncoder, self).__init__() self.encoder_reshape = Im2Seq(in_channels) self.out_channels = self.encoder_reshape.out_channels self.encoder_type = encoder_type if encoder_type == "reshape": self.only_reshape = True else: support_encoder_dict = {"reshape": Im2Seq, "rnn": EncoderWithRNN, "svtr": EncoderWithSVTR} assert encoder_type in support_encoder_dict, "{} must in {}".format( encoder_type, support_encoder_dict.keys() ) self.encoder = support_encoder_dict[encoder_type](self.encoder_reshape.out_channels, **kwargs) self.out_channels = self.encoder.out_channels self.only_reshape = False def forward(self, x): if self.encoder_type != "svtr": x = self.encoder_reshape(x) if not self.only_reshape: x = self.encoder(x) return x else: x = self.encoder(x) x = self.encoder_reshape(x) return x class ConvBNLayer(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, bias_attr=False, groups=1, act=nn.GELU ): super().__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, # weight_attr=paddle.ParamAttr(initializer=nn.initializer.KaimingUniform()), bias=bias_attr, ) self.norm = nn.BatchNorm2d(out_channels) self.act = Swish() def forward(self, inputs): out = self.conv(inputs) out = self.norm(out) out = self.act(out) return out class EncoderWithSVTR(nn.Module): def __init__( self, in_channels, dims=64, # XS depth=2, hidden_dims=120, use_guide=False, num_heads=8, qkv_bias=True, mlp_ratio=2.0, drop_rate=0.1, attn_drop_rate=0.1, drop_path=0.0, qk_scale=None, ): super(EncoderWithSVTR, self).__init__() self.depth = depth self.use_guide = use_guide self.conv1 = ConvBNLayer(in_channels, in_channels // 8, padding=1, act="swish") self.conv2 = ConvBNLayer(in_channels // 8, hidden_dims, kernel_size=1, act="swish") self.svtr_block = nn.ModuleList( [ Block( dim=hidden_dims, num_heads=num_heads, mixer="Global", HW=None, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer="swish", attn_drop=attn_drop_rate, drop_path=drop_path, norm_layer="nn.LayerNorm", epsilon=1e-05, prenorm=False, ) for i in range(depth) ] ) self.norm = nn.LayerNorm(hidden_dims, eps=1e-6) self.conv3 = ConvBNLayer(hidden_dims, in_channels, kernel_size=1, act="swish") # last conv-nxn, the input is concat of input tensor and conv3 output tensor self.conv4 = ConvBNLayer(2 * in_channels, in_channels // 8, padding=1, act="swish") self.conv1x1 = ConvBNLayer(in_channels // 8, dims, kernel_size=1, act="swish") self.out_channels = dims self.apply(self._init_weights) def _init_weights(self, m): # weight initialization if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.ConvTranspose2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.LayerNorm): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) def forward(self, x): # for use guide if self.use_guide: z = x.clone() z.stop_gradient = True else: z = x # for short cut h = z # reduce dim z = self.conv1(z) z = self.conv2(z) # SVTR global block B, C, H, W = z.shape z = z.flatten(2).permute(0, 2, 1) for blk in self.svtr_block: z = blk(z) z = self.norm(z) # last stage z = z.reshape([-1, H, W, C]).permute(0, 3, 1, 2) z = self.conv3(z) z = torch.cat((h, z), dim=1) z = self.conv1x1(self.conv4(z)) return z if __name__ == "__main__": svtrRNN = EncoderWithSVTR(56) print(svtrRNN)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/anytext/ocr_recog/RNN.py", "license": "Apache License 2.0", "lines": 178, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:examples/research_projects/anytext/ocr_recog/RecCTCHead.py
from torch import nn class CTCHead(nn.Module): def __init__( self, in_channels, out_channels=6625, fc_decay=0.0004, mid_channels=None, return_feats=False, **kwargs ): super(CTCHead, self).__init__() if mid_channels is None: self.fc = nn.Linear( in_channels, out_channels, bias=True, ) else: self.fc1 = nn.Linear( in_channels, mid_channels, bias=True, ) self.fc2 = nn.Linear( mid_channels, out_channels, bias=True, ) self.out_channels = out_channels self.mid_channels = mid_channels self.return_feats = return_feats def forward(self, x, labels=None): if self.mid_channels is None: predicts = self.fc(x) else: x = self.fc1(x) predicts = self.fc2(x) if self.return_feats: result = {} result["ctc"] = predicts result["ctc_neck"] = x else: result = predicts return result
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/anytext/ocr_recog/RecCTCHead.py", "license": "Apache License 2.0", "lines": 39, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:examples/research_projects/anytext/ocr_recog/RecModel.py
from torch import nn from .RecCTCHead import CTCHead from .RecMv1_enhance import MobileNetV1Enhance from .RNN import Im2Im, Im2Seq, SequenceEncoder backbone_dict = {"MobileNetV1Enhance": MobileNetV1Enhance} neck_dict = {"SequenceEncoder": SequenceEncoder, "Im2Seq": Im2Seq, "None": Im2Im} head_dict = {"CTCHead": CTCHead} class RecModel(nn.Module): def __init__(self, config): super().__init__() assert "in_channels" in config, "in_channels must in model config" backbone_type = config["backbone"].pop("type") assert backbone_type in backbone_dict, f"backbone.type must in {backbone_dict}" self.backbone = backbone_dict[backbone_type](config["in_channels"], **config["backbone"]) neck_type = config["neck"].pop("type") assert neck_type in neck_dict, f"neck.type must in {neck_dict}" self.neck = neck_dict[neck_type](self.backbone.out_channels, **config["neck"]) head_type = config["head"].pop("type") assert head_type in head_dict, f"head.type must in {head_dict}" self.head = head_dict[head_type](self.neck.out_channels, **config["head"]) self.name = f"RecModel_{backbone_type}_{neck_type}_{head_type}" def load_3rd_state_dict(self, _3rd_name, _state): self.backbone.load_3rd_state_dict(_3rd_name, _state) self.neck.load_3rd_state_dict(_3rd_name, _state) self.head.load_3rd_state_dict(_3rd_name, _state) def forward(self, x): import torch x = x.to(torch.float32) x = self.backbone(x) x = self.neck(x) x = self.head(x) return x def encode(self, x): x = self.backbone(x) x = self.neck(x) x = self.head.ctc_encoder(x) return x
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/anytext/ocr_recog/RecModel.py", "license": "Apache License 2.0", "lines": 37, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:examples/research_projects/anytext/ocr_recog/RecMv1_enhance.py
import torch import torch.nn as nn import torch.nn.functional as F from .common import Activation class ConvBNLayer(nn.Module): def __init__( self, num_channels, filter_size, num_filters, stride, padding, channels=None, num_groups=1, act="hard_swish" ): super(ConvBNLayer, self).__init__() self.act = act self._conv = nn.Conv2d( in_channels=num_channels, out_channels=num_filters, kernel_size=filter_size, stride=stride, padding=padding, groups=num_groups, bias=False, ) self._batch_norm = nn.BatchNorm2d( num_filters, ) if self.act is not None: self._act = Activation(act_type=act, inplace=True) def forward(self, inputs): y = self._conv(inputs) y = self._batch_norm(y) if self.act is not None: y = self._act(y) return y class DepthwiseSeparable(nn.Module): def __init__( self, num_channels, num_filters1, num_filters2, num_groups, stride, scale, dw_size=3, padding=1, use_se=False ): super(DepthwiseSeparable, self).__init__() self.use_se = use_se self._depthwise_conv = ConvBNLayer( num_channels=num_channels, num_filters=int(num_filters1 * scale), filter_size=dw_size, stride=stride, padding=padding, num_groups=int(num_groups * scale), ) if use_se: self._se = SEModule(int(num_filters1 * scale)) self._pointwise_conv = ConvBNLayer( num_channels=int(num_filters1 * scale), filter_size=1, num_filters=int(num_filters2 * scale), stride=1, padding=0, ) def forward(self, inputs): y = self._depthwise_conv(inputs) if self.use_se: y = self._se(y) y = self._pointwise_conv(y) return y class MobileNetV1Enhance(nn.Module): def __init__(self, in_channels=3, scale=0.5, last_conv_stride=1, last_pool_type="max", **kwargs): super().__init__() self.scale = scale self.block_list = [] self.conv1 = ConvBNLayer( num_channels=in_channels, filter_size=3, channels=3, num_filters=int(32 * scale), stride=2, padding=1 ) conv2_1 = DepthwiseSeparable( num_channels=int(32 * scale), num_filters1=32, num_filters2=64, num_groups=32, stride=1, scale=scale ) self.block_list.append(conv2_1) conv2_2 = DepthwiseSeparable( num_channels=int(64 * scale), num_filters1=64, num_filters2=128, num_groups=64, stride=1, scale=scale ) self.block_list.append(conv2_2) conv3_1 = DepthwiseSeparable( num_channels=int(128 * scale), num_filters1=128, num_filters2=128, num_groups=128, stride=1, scale=scale ) self.block_list.append(conv3_1) conv3_2 = DepthwiseSeparable( num_channels=int(128 * scale), num_filters1=128, num_filters2=256, num_groups=128, stride=(2, 1), scale=scale, ) self.block_list.append(conv3_2) conv4_1 = DepthwiseSeparable( num_channels=int(256 * scale), num_filters1=256, num_filters2=256, num_groups=256, stride=1, scale=scale ) self.block_list.append(conv4_1) conv4_2 = DepthwiseSeparable( num_channels=int(256 * scale), num_filters1=256, num_filters2=512, num_groups=256, stride=(2, 1), scale=scale, ) self.block_list.append(conv4_2) for _ in range(5): conv5 = DepthwiseSeparable( num_channels=int(512 * scale), num_filters1=512, num_filters2=512, num_groups=512, stride=1, dw_size=5, padding=2, scale=scale, use_se=False, ) self.block_list.append(conv5) conv5_6 = DepthwiseSeparable( num_channels=int(512 * scale), num_filters1=512, num_filters2=1024, num_groups=512, stride=(2, 1), dw_size=5, padding=2, scale=scale, use_se=True, ) self.block_list.append(conv5_6) conv6 = DepthwiseSeparable( num_channels=int(1024 * scale), num_filters1=1024, num_filters2=1024, num_groups=1024, stride=last_conv_stride, dw_size=5, padding=2, use_se=True, scale=scale, ) self.block_list.append(conv6) self.block_list = nn.Sequential(*self.block_list) if last_pool_type == "avg": self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) else: self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.out_channels = int(1024 * scale) def forward(self, inputs): y = self.conv1(inputs) y = self.block_list(y) y = self.pool(y) return y def hardsigmoid(x): return F.relu6(x + 3.0, inplace=True) / 6.0 class SEModule(nn.Module): def __init__(self, channel, reduction=4): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv1 = nn.Conv2d( in_channels=channel, out_channels=channel // reduction, kernel_size=1, stride=1, padding=0, bias=True ) self.conv2 = nn.Conv2d( in_channels=channel // reduction, out_channels=channel, kernel_size=1, stride=1, padding=0, bias=True ) def forward(self, inputs): outputs = self.avg_pool(inputs) outputs = self.conv1(outputs) outputs = F.relu(outputs) outputs = self.conv2(outputs) outputs = hardsigmoid(outputs) x = torch.mul(inputs, outputs) return x
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/anytext/ocr_recog/RecMv1_enhance.py", "license": "Apache License 2.0", "lines": 169, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:examples/research_projects/anytext/ocr_recog/RecSVTR.py
import numpy as np import torch import torch.nn as nn from torch.nn import functional from torch.nn.init import ones_, trunc_normal_, zeros_ def drop_path(x, drop_prob=0.0, training=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... """ if drop_prob == 0.0 or not training: return x keep_prob = torch.tensor(1 - drop_prob) shape = (x.size()[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype) random_tensor = torch.floor(random_tensor) # binarize output = x.divide(keep_prob) * random_tensor return output class Swish(nn.Module): def __int__(self): super(Swish, self).__int__() def forward(self, x): return x * torch.sigmoid(x) class ConvBNLayer(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, bias_attr=False, groups=1, act=nn.GELU ): super().__init__() self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, # weight_attr=paddle.ParamAttr(initializer=nn.initializer.KaimingUniform()), bias=bias_attr, ) self.norm = nn.BatchNorm2d(out_channels) self.act = act() def forward(self, inputs): out = self.conv(inputs) out = self.norm(out) out = self.act(out) return out class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Identity(nn.Module): def __init__(self): super(Identity, self).__init__() def forward(self, input): return input class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) if isinstance(act_layer, str): self.act = Swish() else: self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class ConvMixer(nn.Module): def __init__( self, dim, num_heads=8, HW=(8, 25), local_k=(3, 3), ): super().__init__() self.HW = HW self.dim = dim self.local_mixer = nn.Conv2d( dim, dim, local_k, 1, (local_k[0] // 2, local_k[1] // 2), groups=num_heads, # weight_attr=ParamAttr(initializer=KaimingNormal()) ) def forward(self, x): h = self.HW[0] w = self.HW[1] x = x.transpose([0, 2, 1]).reshape([0, self.dim, h, w]) x = self.local_mixer(x) x = x.flatten(2).transpose([0, 2, 1]) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, mixer="Global", HW=(8, 25), local_k=(7, 11), qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.HW = HW if HW is not None: H = HW[0] W = HW[1] self.N = H * W self.C = dim if mixer == "Local" and HW is not None: hk = local_k[0] wk = local_k[1] mask = torch.ones([H * W, H + hk - 1, W + wk - 1]) for h in range(0, H): for w in range(0, W): mask[h * W + w, h : h + hk, w : w + wk] = 0.0 mask_paddle = mask[:, hk // 2 : H + hk // 2, wk // 2 : W + wk // 2].flatten(1) mask_inf = torch.full([H * W, H * W], fill_value=float("-inf")) mask = torch.where(mask_paddle < 1, mask_paddle, mask_inf) self.mask = mask[None, None, :] # self.mask = mask.unsqueeze([0, 1]) self.mixer = mixer def forward(self, x): if self.HW is not None: N = self.N C = self.C else: _, N, C = x.shape qkv = self.qkv(x).reshape((-1, N, 3, self.num_heads, C // self.num_heads)).permute((2, 0, 3, 1, 4)) q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] attn = q.matmul(k.permute((0, 1, 3, 2))) if self.mixer == "Local": attn += self.mask attn = functional.softmax(attn, dim=-1) attn = self.attn_drop(attn) x = (attn.matmul(v)).permute((0, 2, 1, 3)).reshape((-1, N, C)) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mixer="Global", local_mixer=(7, 11), HW=(8, 25), mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer="nn.LayerNorm", epsilon=1e-6, prenorm=True, ): super().__init__() if isinstance(norm_layer, str): self.norm1 = eval(norm_layer)(dim, eps=epsilon) else: self.norm1 = norm_layer(dim) if mixer == "Global" or mixer == "Local": self.mixer = Attention( dim, num_heads=num_heads, mixer=mixer, HW=HW, local_k=local_mixer, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, ) elif mixer == "Conv": self.mixer = ConvMixer(dim, num_heads=num_heads, HW=HW, local_k=local_mixer) else: raise TypeError("The mixer must be one of [Global, Local, Conv]") self.drop_path = DropPath(drop_path) if drop_path > 0.0 else Identity() if isinstance(norm_layer, str): self.norm2 = eval(norm_layer)(dim, eps=epsilon) else: self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp_ratio = mlp_ratio self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) self.prenorm = prenorm def forward(self, x): if self.prenorm: x = self.norm1(x + self.drop_path(self.mixer(x))) x = self.norm2(x + self.drop_path(self.mlp(x))) else: x = x + self.drop_path(self.mixer(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """Image to Patch Embedding""" def __init__(self, img_size=(32, 100), in_channels=3, embed_dim=768, sub_num=2): super().__init__() num_patches = (img_size[1] // (2**sub_num)) * (img_size[0] // (2**sub_num)) self.img_size = img_size self.num_patches = num_patches self.embed_dim = embed_dim self.norm = None if sub_num == 2: self.proj = nn.Sequential( ConvBNLayer( in_channels=in_channels, out_channels=embed_dim // 2, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ConvBNLayer( in_channels=embed_dim // 2, out_channels=embed_dim, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ) if sub_num == 3: self.proj = nn.Sequential( ConvBNLayer( in_channels=in_channels, out_channels=embed_dim // 4, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ConvBNLayer( in_channels=embed_dim // 4, out_channels=embed_dim // 2, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ConvBNLayer( in_channels=embed_dim // 2, out_channels=embed_dim, kernel_size=3, stride=2, padding=1, act=nn.GELU, bias_attr=False, ), ) def forward(self, x): B, C, H, W = x.shape assert H == self.img_size[0] and W == self.img_size[1], ( f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." ) x = self.proj(x).flatten(2).permute(0, 2, 1) return x class SubSample(nn.Module): def __init__(self, in_channels, out_channels, types="Pool", stride=(2, 1), sub_norm="nn.LayerNorm", act=None): super().__init__() self.types = types if types == "Pool": self.avgpool = nn.AvgPool2d(kernel_size=(3, 5), stride=stride, padding=(1, 2)) self.maxpool = nn.MaxPool2d(kernel_size=(3, 5), stride=stride, padding=(1, 2)) self.proj = nn.Linear(in_channels, out_channels) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, # weight_attr=ParamAttr(initializer=KaimingNormal()) ) self.norm = eval(sub_norm)(out_channels) if act is not None: self.act = act() else: self.act = None def forward(self, x): if self.types == "Pool": x1 = self.avgpool(x) x2 = self.maxpool(x) x = (x1 + x2) * 0.5 out = self.proj(x.flatten(2).permute((0, 2, 1))) else: x = self.conv(x) out = x.flatten(2).permute((0, 2, 1)) out = self.norm(out) if self.act is not None: out = self.act(out) return out class SVTRNet(nn.Module): def __init__( self, img_size=[48, 100], in_channels=3, embed_dim=[64, 128, 256], depth=[3, 6, 3], num_heads=[2, 4, 8], mixer=["Local"] * 6 + ["Global"] * 6, # Local atten, Global atten, Conv local_mixer=[[7, 11], [7, 11], [7, 11]], patch_merging="Conv", # Conv, Pool, None mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0.0, last_drop=0.1, attn_drop_rate=0.0, drop_path_rate=0.1, norm_layer="nn.LayerNorm", sub_norm="nn.LayerNorm", epsilon=1e-6, out_channels=192, out_char_num=25, block_unit="Block", act="nn.GELU", last_stage=True, sub_num=2, prenorm=True, use_lenhead=False, **kwargs, ): super().__init__() self.img_size = img_size self.embed_dim = embed_dim self.out_channels = out_channels self.prenorm = prenorm patch_merging = None if patch_merging != "Conv" and patch_merging != "Pool" else patch_merging self.patch_embed = PatchEmbed( img_size=img_size, in_channels=in_channels, embed_dim=embed_dim[0], sub_num=sub_num ) num_patches = self.patch_embed.num_patches self.HW = [img_size[0] // (2**sub_num), img_size[1] // (2**sub_num)] self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim[0])) # self.pos_embed = self.create_parameter( # shape=[1, num_patches, embed_dim[0]], default_initializer=zeros_) # self.add_parameter("pos_embed", self.pos_embed) self.pos_drop = nn.Dropout(p=drop_rate) Block_unit = eval(block_unit) dpr = np.linspace(0, drop_path_rate, sum(depth)) self.blocks1 = nn.ModuleList( [ Block_unit( dim=embed_dim[0], num_heads=num_heads[0], mixer=mixer[0 : depth[0]][i], HW=self.HW, local_mixer=local_mixer[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=eval(act), attn_drop=attn_drop_rate, drop_path=dpr[0 : depth[0]][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm, ) for i in range(depth[0]) ] ) if patch_merging is not None: self.sub_sample1 = SubSample( embed_dim[0], embed_dim[1], sub_norm=sub_norm, stride=[2, 1], types=patch_merging ) HW = [self.HW[0] // 2, self.HW[1]] else: HW = self.HW self.patch_merging = patch_merging self.blocks2 = nn.ModuleList( [ Block_unit( dim=embed_dim[1], num_heads=num_heads[1], mixer=mixer[depth[0] : depth[0] + depth[1]][i], HW=HW, local_mixer=local_mixer[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=eval(act), attn_drop=attn_drop_rate, drop_path=dpr[depth[0] : depth[0] + depth[1]][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm, ) for i in range(depth[1]) ] ) if patch_merging is not None: self.sub_sample2 = SubSample( embed_dim[1], embed_dim[2], sub_norm=sub_norm, stride=[2, 1], types=patch_merging ) HW = [self.HW[0] // 4, self.HW[1]] else: HW = self.HW self.blocks3 = nn.ModuleList( [ Block_unit( dim=embed_dim[2], num_heads=num_heads[2], mixer=mixer[depth[0] + depth[1] :][i], HW=HW, local_mixer=local_mixer[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=eval(act), attn_drop=attn_drop_rate, drop_path=dpr[depth[0] + depth[1] :][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm, ) for i in range(depth[2]) ] ) self.last_stage = last_stage if last_stage: self.avg_pool = nn.AdaptiveAvgPool2d((1, out_char_num)) self.last_conv = nn.Conv2d( in_channels=embed_dim[2], out_channels=self.out_channels, kernel_size=1, stride=1, padding=0, bias=False, ) self.hardswish = nn.Hardswish() self.dropout = nn.Dropout(p=last_drop) if not prenorm: self.norm = eval(norm_layer)(embed_dim[-1], epsilon=epsilon) self.use_lenhead = use_lenhead if use_lenhead: self.len_conv = nn.Linear(embed_dim[2], self.out_channels) self.hardswish_len = nn.Hardswish() self.dropout_len = nn.Dropout(p=last_drop) trunc_normal_(self.pos_embed, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: zeros_(m.bias) elif isinstance(m, nn.LayerNorm): zeros_(m.bias) ones_(m.weight) def forward_features(self, x): x = self.patch_embed(x) x = x + self.pos_embed x = self.pos_drop(x) for blk in self.blocks1: x = blk(x) if self.patch_merging is not None: x = self.sub_sample1(x.permute([0, 2, 1]).reshape([-1, self.embed_dim[0], self.HW[0], self.HW[1]])) for blk in self.blocks2: x = blk(x) if self.patch_merging is not None: x = self.sub_sample2(x.permute([0, 2, 1]).reshape([-1, self.embed_dim[1], self.HW[0] // 2, self.HW[1]])) for blk in self.blocks3: x = blk(x) if not self.prenorm: x = self.norm(x) return x def forward(self, x): x = self.forward_features(x) if self.use_lenhead: len_x = self.len_conv(x.mean(1)) len_x = self.dropout_len(self.hardswish_len(len_x)) if self.last_stage: if self.patch_merging is not None: h = self.HW[0] // 4 else: h = self.HW[0] x = self.avg_pool(x.permute([0, 2, 1]).reshape([-1, self.embed_dim[2], h, self.HW[1]])) x = self.last_conv(x) x = self.hardswish(x) x = self.dropout(x) if self.use_lenhead: return x, len_x return x if __name__ == "__main__": a = torch.rand(1, 3, 48, 100) svtr = SVTRNet() out = svtr(a) print(svtr) print(out.size())
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/anytext/ocr_recog/RecSVTR.py", "license": "Apache License 2.0", "lines": 519, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:examples/research_projects/anytext/ocr_recog/common.py
import torch import torch.nn as nn import torch.nn.functional as F class Hswish(nn.Module): def __init__(self, inplace=True): super(Hswish, self).__init__() self.inplace = inplace def forward(self, x): return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0 # out = max(0, min(1, slop*x+offset)) # paddle.fluid.layers.hard_sigmoid(x, slope=0.2, offset=0.5, name=None) class Hsigmoid(nn.Module): def __init__(self, inplace=True): super(Hsigmoid, self).__init__() self.inplace = inplace def forward(self, x): # torch: F.relu6(x + 3., inplace=self.inplace) / 6. # paddle: F.relu6(1.2 * x + 3., inplace=self.inplace) / 6. return F.relu6(1.2 * x + 3.0, inplace=self.inplace) / 6.0 class GELU(nn.Module): def __init__(self, inplace=True): super(GELU, self).__init__() self.inplace = inplace def forward(self, x): return torch.nn.functional.gelu(x) class Swish(nn.Module): def __init__(self, inplace=True): super(Swish, self).__init__() self.inplace = inplace def forward(self, x): if self.inplace: x.mul_(torch.sigmoid(x)) return x else: return x * torch.sigmoid(x) class Activation(nn.Module): def __init__(self, act_type, inplace=True): super(Activation, self).__init__() act_type = act_type.lower() if act_type == "relu": self.act = nn.ReLU(inplace=inplace) elif act_type == "relu6": self.act = nn.ReLU6(inplace=inplace) elif act_type == "sigmoid": raise NotImplementedError elif act_type == "hard_sigmoid": self.act = Hsigmoid(inplace) elif act_type == "hard_swish": self.act = Hswish(inplace=inplace) elif act_type == "leakyrelu": self.act = nn.LeakyReLU(inplace=inplace) elif act_type == "gelu": self.act = GELU(inplace=inplace) elif act_type == "swish": self.act = Swish(inplace=inplace) else: raise NotImplementedError def forward(self, inputs): return self.act(inputs)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/anytext/ocr_recog/common.py", "license": "Apache License 2.0", "lines": 59, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:tests/quantization/utils.py
from diffusers.utils import is_torch_available from ..testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_peak_memory_stats, torch_device, ) if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only Taken from https://github.com/huggingface/transformers/blob/566302686a71de14125717dea9a6a45b24d42b37/tests/quantization/bnb/test_4bit.py#L62C5-L78C77 """ def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @torch.no_grad() @torch.inference_mode() def get_memory_consumption_stat(model, inputs): backend_reset_peak_memory_stats(torch_device) backend_empty_cache(torch_device) model(**inputs) max_mem_allocated = backend_max_memory_allocated(torch_device) return max_mem_allocated
{ "repo_id": "huggingface/diffusers", "file_path": "tests/quantization/utils.py", "license": "Apache License 2.0", "lines": 36, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/lora/test_lora_layers_cogview4.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import tempfile import unittest import numpy as np import torch from parameterized import parameterized from transformers import AutoTokenizer, GlmModel from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler from ..testing_utils import ( floats_tensor, require_peft_backend, require_torch_accelerator, skip_mps, torch_device, ) sys.path.append(".") from .utils import PeftLoraLoaderMixinTests # noqa: E402 class TokenizerWrapper: @staticmethod def from_pretrained(*args, **kwargs): return AutoTokenizer.from_pretrained( "hf-internal-testing/tiny-random-cogview4", subfolder="tokenizer", trust_remote_code=True ) @require_peft_backend @skip_mps class CogView4LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = CogView4Pipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_kwargs = {} transformer_kwargs = { "patch_size": 2, "in_channels": 4, "num_layers": 2, "attention_head_dim": 4, "num_attention_heads": 4, "out_channels": 4, "text_embed_dim": 32, "time_embed_dim": 8, "condition_dim": 4, } transformer_cls = CogView4Transformer2DModel vae_kwargs = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, "sample_size": 128, } vae_cls = AutoencoderKL tokenizer_cls, tokenizer_id, tokenizer_subfolder = ( TokenizerWrapper, "hf-internal-testing/tiny-random-cogview4", "tokenizer", ) text_encoder_cls, text_encoder_id, text_encoder_subfolder = ( GlmModel, "hf-internal-testing/tiny-random-cogview4", "text_encoder", ) supports_text_encoder_loras = False @property def output_shape(self): return (1, 32, 32, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 16 num_channels = 4 sizes = (4, 4) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "", "num_inference_steps": 1, "guidance_scale": 6.0, "height": 32, "width": 32, "max_sequence_length": sequence_length, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs def test_simple_inference_with_text_lora_denoiser_fused_multi(self): super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) def test_simple_inference_save_pretrained(self): """ Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained """ components, _, _ = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname) pipe_from_pretrained.to(torch_device) images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3), "Loading from saved checkpoints should give same results.", ) @parameterized.expand([("block_level", True), ("leaf_level", False)]) @require_torch_accelerator def test_group_offloading_inference_denoiser(self, offload_type, use_stream): # TODO: We don't run the (leaf_level, True) test here that is enabled for other models. # The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338 super()._test_group_offloading_inference_denoiser(offload_type, use_stream) @unittest.skip("Not supported in CogView4.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in CogView4.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in CogView4.") def test_modify_padding_mode(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/lora/test_lora_layers_cogview4.py", "license": "Apache License 2.0", "lines": 136, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/single_file/test_sana_transformer.py
from diffusers import ( SanaTransformer2DModel, ) from ..testing_utils import ( enable_full_determinism, ) from .single_file_testing_utils import SingleFileModelTesterMixin enable_full_determinism() class TestSanaTransformer2DModelSingleFile(SingleFileModelTesterMixin): model_class = SanaTransformer2DModel ckpt_path = ( "https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth" ) alternate_keys_ckpt_paths = [ "https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth" ] repo_id = "Efficient-Large-Model/Sana_1600M_1024px_diffusers" subfolder = "transformer"
{ "repo_id": "huggingface/diffusers", "file_path": "tests/single_file/test_sana_transformer.py", "license": "Apache License 2.0", "lines": 18, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/quantizers/quanto/quanto_quantizer.py
from typing import TYPE_CHECKING, Any from diffusers.utils.import_utils import is_optimum_quanto_version from ...utils import ( get_module_from_name, is_accelerate_available, is_accelerate_version, is_optimum_quanto_available, is_torch_available, logging, ) from ..base import DiffusersQuantizer if TYPE_CHECKING: from ...models.modeling_utils import ModelMixin if is_torch_available(): import torch if is_accelerate_available(): from accelerate.utils import CustomDtype, set_module_tensor_to_device if is_optimum_quanto_available(): from .utils import _replace_with_quanto_layers logger = logging.get_logger(__name__) class QuantoQuantizer(DiffusersQuantizer): r""" Diffusers Quantizer for Optimum Quanto """ use_keep_in_fp32_modules = True requires_calibration = False required_packages = ["quanto", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) def validate_environment(self, *args, **kwargs): if not is_optimum_quanto_available(): raise ImportError( "Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)" ) if not is_optimum_quanto_version(">=", "0.2.6"): raise ImportError( "Loading an optimum-quanto quantized model requires `optimum-quanto>=0.2.6`. " "Please upgrade your installation with `pip install --upgrade optimum-quanto" ) if not is_accelerate_available(): raise ImportError( "Loading an optimum-quanto quantized model requires accelerate library (`pip install accelerate`)" ) device_map = kwargs.get("device_map", None) if isinstance(device_map, dict) and len(device_map.keys()) > 1: raise ValueError( "`device_map` for multi-GPU inference or CPU/disk offload is currently not supported with Diffusers and the Quanto backend" ) def check_if_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ): # Quanto imports diffusers internally. This is here to prevent circular imports from optimum.quanto import QModuleMixin, QTensor from optimum.quanto.tensor.packed import PackedTensor module, tensor_name = get_module_from_name(model, param_name) if self.pre_quantized and any(isinstance(module, t) for t in [QTensor, PackedTensor]): return True elif isinstance(module, QModuleMixin) and "weight" in tensor_name: return not module.frozen return False def create_quantized_param( self, model: "ModelMixin", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", *args, **kwargs, ): """ Create the quantized parameter by calling .freeze() after setting it to the module. """ dtype = kwargs.get("dtype", torch.float32) module, tensor_name = get_module_from_name(model, param_name) if self.pre_quantized: setattr(module, tensor_name, param_value) else: set_module_tensor_to_device(model, param_name, target_device, param_value, dtype) module.freeze() module.weight.requires_grad = False def adjust_max_memory(self, max_memory: dict[str, int | str]) -> dict[str, int | str]: max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if is_accelerate_version(">=", "0.27.0"): mapping = { "int8": torch.int8, "float8": CustomDtype.FP8, "int4": CustomDtype.INT4, "int2": CustomDtype.INT2, } target_dtype = mapping[self.quantization_config.weights_dtype] return target_dtype def update_torch_dtype(self, torch_dtype: "torch.dtype" = None) -> "torch.dtype": if torch_dtype is None: logger.info("You did not specify `torch_dtype` in `from_pretrained`. Setting it to `torch.float32`.") torch_dtype = torch.float32 return torch_dtype def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]: # Quanto imports diffusers internally. This is here to prevent circular imports from optimum.quanto import QModuleMixin not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, QModuleMixin): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def _process_model_before_weight_loading( self, model: "ModelMixin", device_map, keep_in_fp32_modules: list[str] = [], **kwargs, ): self.modules_to_not_convert = self.quantization_config.modules_to_not_convert if not isinstance(self.modules_to_not_convert, list): self.modules_to_not_convert = [self.modules_to_not_convert] self.modules_to_not_convert.extend(keep_in_fp32_modules) model = _replace_with_quanto_layers( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, ) model.config.quantization_config = self.quantization_config def _process_model_after_weight_loading(self, model, **kwargs): return model @property def is_trainable(self): return True @property def is_serializable(self): return True @property def is_compileable(self) -> bool: return True
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/quantizers/quanto/quanto_quantizer.py", "license": "Apache License 2.0", "lines": 146, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:src/diffusers/quantizers/quanto/utils.py
import torch.nn as nn from ...utils import is_accelerate_available, logging logger = logging.get_logger(__name__) if is_accelerate_available(): from accelerate import init_empty_weights def _replace_with_quanto_layers(model, quantization_config, modules_to_not_convert: list, pre_quantized=False): # Quanto imports diffusers internally. These are placed here to avoid circular imports from optimum.quanto import QLinear, freeze, qfloat8, qint2, qint4, qint8 def _get_weight_type(dtype: str): return {"float8": qfloat8, "int8": qint8, "int4": qint4, "int2": qint2}[dtype] def _replace_layers(model, quantization_config, modules_to_not_convert): has_children = list(model.children()) if not has_children: return model for name, module in model.named_children(): _replace_layers(module, quantization_config, modules_to_not_convert) if name in modules_to_not_convert: continue if isinstance(module, nn.Linear): with init_empty_weights(): qlinear = QLinear( in_features=module.in_features, out_features=module.out_features, bias=module.bias is not None, dtype=module.weight.dtype, weights=_get_weight_type(quantization_config.weights_dtype), ) model._modules[name] = qlinear model._modules[name].source_cls = type(module) model._modules[name].requires_grad_(False) return model model = _replace_layers(model, quantization_config, modules_to_not_convert) has_been_replaced = any(isinstance(replaced_module, QLinear) for _, replaced_module in model.named_modules()) if not has_been_replaced: logger.warning( f"{model.__class__.__name__} does not appear to have any `nn.Linear` modules. Quantization will not be applied." " Please check your model architecture, or submit an issue on Github if you think this is a bug." " https://github.com/huggingface/diffusers/issues/new" ) # We need to freeze the pre_quantized model in order for the loaded state_dict and model state dict # to match when trying to load weights with load_model_dict_into_meta if pre_quantized: freeze(model) return model
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/quantizers/quanto/utils.py", "license": "Apache License 2.0", "lines": 44, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:tests/quantization/quanto/test_quanto.py
import gc import tempfile import unittest from diffusers import FluxPipeline, FluxTransformer2DModel, QuantoConfig from diffusers.models.attention_processor import Attention from diffusers.utils import is_optimum_quanto_available, is_torch_available from ...testing_utils import ( backend_empty_cache, backend_reset_peak_memory_stats, enable_full_determinism, nightly, numpy_cosine_similarity_distance, require_accelerate, require_accelerator, require_torch_cuda_compatibility, torch_device, ) if is_optimum_quanto_available(): from optimum.quanto import QLinear if is_torch_available(): import torch from ..utils import LoRALayer, get_memory_consumption_stat enable_full_determinism() @nightly @require_accelerator @require_accelerate class QuantoBaseTesterMixin: model_id = None pipeline_model_id = None model_cls = None torch_dtype = torch.bfloat16 # the expected reduction in peak memory used compared to an unquantized model expressed as a percentage expected_memory_reduction = 0.0 keep_in_fp32_module = "" modules_to_not_convert = "" _test_torch_compile = False def setUp(self): backend_reset_peak_memory_stats(torch_device) backend_empty_cache(torch_device) gc.collect() def tearDown(self): backend_reset_peak_memory_stats(torch_device) backend_empty_cache(torch_device) gc.collect() def get_dummy_init_kwargs(self): return {"weights_dtype": "float8"} def get_dummy_model_init_kwargs(self): return { "pretrained_model_name_or_path": self.model_id, "torch_dtype": self.torch_dtype, "quantization_config": QuantoConfig(**self.get_dummy_init_kwargs()), } def test_quanto_layers(self): model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): assert isinstance(module, QLinear) def test_quanto_memory_usage(self): inputs = self.get_dummy_inputs() inputs = { k: v.to(device=torch_device, dtype=torch.bfloat16) for k, v in inputs.items() if not isinstance(v, bool) } unquantized_model = self.model_cls.from_pretrained(self.model_id, torch_dtype=self.torch_dtype) unquantized_model.to(torch_device) unquantized_model_memory = get_memory_consumption_stat(unquantized_model, inputs) quantized_model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) quantized_model.to(torch_device) quantized_model_memory = get_memory_consumption_stat(quantized_model, inputs) assert unquantized_model_memory / quantized_model_memory >= self.expected_memory_reduction def test_keep_modules_in_fp32(self): r""" A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32. Also ensures if inference works. """ _keep_in_fp32_modules = self.model_cls._keep_in_fp32_modules self.model_cls._keep_in_fp32_modules = self.keep_in_fp32_module model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) model.to(torch_device) for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): if name in model._keep_in_fp32_modules: assert module.weight.dtype == torch.float32 self.model_cls._keep_in_fp32_modules = _keep_in_fp32_modules def test_modules_to_not_convert(self): init_kwargs = self.get_dummy_model_init_kwargs() quantization_config_kwargs = self.get_dummy_init_kwargs() quantization_config_kwargs.update({"modules_to_not_convert": self.modules_to_not_convert}) quantization_config = QuantoConfig(**quantization_config_kwargs) init_kwargs.update({"quantization_config": quantization_config}) model = self.model_cls.from_pretrained(**init_kwargs) model.to(torch_device) for name, module in model.named_modules(): if name in self.modules_to_not_convert: assert not isinstance(module, QLinear) def test_dtype_assignment(self): model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) with self.assertRaises(ValueError): # Tries with a `dtype` model.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` and `dtype` device_0 = f"{torch_device}:0" model.to(device=device_0, dtype=torch.float16) with self.assertRaises(ValueError): # Tries with a cast model.float() with self.assertRaises(ValueError): # Tries with a cast model.half() # This should work model.to(torch_device) def test_serialization(self): model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) inputs = self.get_dummy_inputs() model.to(torch_device) with torch.no_grad(): model_output = model(**inputs) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) saved_model = self.model_cls.from_pretrained( tmp_dir, torch_dtype=torch.bfloat16, ) saved_model.to(torch_device) with torch.no_grad(): saved_model_output = saved_model(**inputs) assert torch.allclose(model_output.sample, saved_model_output.sample, rtol=1e-5, atol=1e-5) def test_torch_compile(self): if not self._test_torch_compile: return model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) compiled_model = torch.compile(model, mode="max-autotune", fullgraph=True, dynamic=False) model.to(torch_device) with torch.no_grad(): model_output = model(**self.get_dummy_inputs()).sample compiled_model.to(torch_device) with torch.no_grad(): compiled_model_output = compiled_model(**self.get_dummy_inputs()).sample model_output = model_output.detach().float().cpu().numpy() compiled_model_output = compiled_model_output.detach().float().cpu().numpy() max_diff = numpy_cosine_similarity_distance(model_output.flatten(), compiled_model_output.flatten()) assert max_diff < 1e-3 def test_device_map_error(self): with self.assertRaises(ValueError): _ = self.model_cls.from_pretrained( **self.get_dummy_model_init_kwargs(), device_map={0: "8GB", "cpu": "16GB"} ) class FluxTransformerQuantoMixin(QuantoBaseTesterMixin): model_id = "hf-internal-testing/tiny-flux-transformer" model_cls = FluxTransformer2DModel pipeline_cls = FluxPipeline torch_dtype = torch.bfloat16 keep_in_fp32_module = "proj_out" modules_to_not_convert = ["proj_out"] _test_torch_compile = False def get_dummy_inputs(self): return { "hidden_states": torch.randn((1, 4096, 64), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "encoder_hidden_states": torch.randn( (1, 512, 4096), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "pooled_projections": torch.randn( (1, 768), generator=torch.Generator("cpu").manual_seed(0), ).to(torch_device, self.torch_dtype), "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), "img_ids": torch.randn((4096, 3), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "txt_ids": torch.randn((512, 3), generator=torch.Generator("cpu").manual_seed(0)).to( torch_device, self.torch_dtype ), "guidance": torch.tensor([3.5]).to(torch_device, self.torch_dtype), } def get_dummy_training_inputs(self, device=None, seed: int = 0): batch_size = 1 num_latent_channels = 4 num_image_channels = 3 height = width = 4 sequence_length = 48 embedding_dim = 32 torch.manual_seed(seed) hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( device, dtype=torch.bfloat16 ) torch.manual_seed(seed) pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_prompt_embeds, "txt_ids": text_ids, "img_ids": image_ids, "timestep": timestep, } def test_model_cpu_offload(self): init_kwargs = self.get_dummy_init_kwargs() transformer = self.model_cls.from_pretrained( "hf-internal-testing/tiny-flux-pipe", quantization_config=QuantoConfig(**init_kwargs), subfolder="transformer", torch_dtype=torch.bfloat16, ) pipe = self.pipeline_cls.from_pretrained( "hf-internal-testing/tiny-flux-pipe", transformer=transformer, torch_dtype=torch.bfloat16 ) pipe.enable_model_cpu_offload(device=torch_device) _ = pipe("a cat holding a sign that says hello", num_inference_steps=2) def test_training(self): quantization_config = QuantoConfig(**self.get_dummy_init_kwargs()) quantized_model = self.model_cls.from_pretrained( "hf-internal-testing/tiny-flux-pipe", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ).to(torch_device) for param in quantized_model.parameters(): # freeze the model as only adapter layers will be trained param.requires_grad = False if param.ndim == 1: param.data = param.data.to(torch.float32) for _, module in quantized_model.named_modules(): if isinstance(module, Attention): module.to_q = LoRALayer(module.to_q, rank=4) module.to_k = LoRALayer(module.to_k, rank=4) module.to_v = LoRALayer(module.to_v, rank=4) with torch.amp.autocast(str(torch_device), dtype=torch.bfloat16): inputs = self.get_dummy_training_inputs(torch_device) output = quantized_model(**inputs)[0] output.norm().backward() for module in quantized_model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) class FluxTransformerFloat8WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): expected_memory_reduction = 0.6 def get_dummy_init_kwargs(self): return {"weights_dtype": "float8"} class FluxTransformerInt8WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): expected_memory_reduction = 0.6 _test_torch_compile = True def get_dummy_init_kwargs(self): return {"weights_dtype": "int8"} @require_torch_cuda_compatibility(8.0) class FluxTransformerInt4WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): expected_memory_reduction = 0.55 def get_dummy_init_kwargs(self): return {"weights_dtype": "int4"} @require_torch_cuda_compatibility(8.0) class FluxTransformerInt2WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): expected_memory_reduction = 0.65 def get_dummy_init_kwargs(self): return {"weights_dtype": "int2"}
{ "repo_id": "huggingface/diffusers", "file_path": "tests/quantization/quanto/test_quanto.py", "license": "Apache License 2.0", "lines": 262, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/community/pipeline_stg_cogvideox.py
# Copyright 2025 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math import types from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import T5EncoderModel, T5Tokenizer from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.loaders import CogVideoXLoraLoaderMixin from diffusers.models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel from diffusers.models.embeddings import get_3d_rotary_pos_embed from diffusers.pipelines.cogvideo.pipeline_output import CogVideoXPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers.utils import export_to_video >>> from examples.community.pipeline_stg_cogvideox import CogVideoXSTGPipeline >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b" >>> pipe = CogVideoXSTGPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.float16).to("cuda") >>> prompt = ( ... "A father and son building a treehouse together, their hands covered in sawdust and smiles on their faces, realistic style." ... ) >>> pipe.transformer.to(memory_format=torch.channels_last) >>> # Configure STG mode options >>> stg_applied_layers_idx = [11] # Layer indices from 0 to 41 >>> stg_scale = 1.0 # Set to 0.0 for CFG >>> do_rescaling = False >>> # Generate video frames with STG parameters >>> frames = pipe( ... prompt=prompt, ... stg_applied_layers_idx=stg_applied_layers_idx, ... stg_scale=stg_scale, ... do_rescaling=do_rescaling, >>> ).frames[0] >>> export_to_video(frames, "output.mp4", fps=8) ``` """ def forward_with_stg( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, ) -> torch.Tensor: hidden_states_ptb = hidden_states[2:] encoder_hidden_states_ptb = encoder_hidden_states[2:] text_seq_length = encoder_hidden_states.size(1) # norm & modulate norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( hidden_states, encoder_hidden_states, temb ) # attention attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + gate_msa * attn_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states # norm & modulate norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2( hidden_states, encoder_hidden_states, temb ) # feed-forward norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1) ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:] encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length] hidden_states[2:] = hidden_states_ptb encoder_hidden_states[2:] = encoder_hidden_states_ptb return hidden_states, encoder_hidden_states # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): tw = tgt_width th = tgt_height h, w = src r = h / w if r > (th / tw): resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class CogVideoXSTGPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): r""" Pipeline for text-to-video generation using CogVideoX. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder ([`T5EncoderModel`]): Frozen text-encoder. CogVideoX uses [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. tokenizer (`T5Tokenizer`): Tokenizer of class [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). transformer ([`CogVideoXTransformer3DModel`]): A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `transformer` to denoise the encoded video latents. """ _optional_components = [] model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", ] def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKLCogVideoX, transformer: CogVideoXTransformer3DModel, scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor_spatial = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 ) self.vae_scale_factor_temporal = ( self.vae.config.temporal_compression_ratio if getattr(self, "vae", None) else 4 ) self.vae_scaling_factor_image = self.vae.config.scaling_factor if getattr(self, "vae", None) else 0.7 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, max_sequence_length: int = 226, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) shape = ( batch_size, (num_frames - 1) // self.vae_scale_factor_temporal + 1, num_channels_latents, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width] latents = 1 / self.vae_scaling_factor_image * latents frames = self.vae.decode(latents).sample return frames # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs def check_inputs( self, prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def fuse_qkv_projections(self) -> None: r"""Enables fused QKV projections.""" self.fusing_transformer = True self.transformer.fuse_qkv_projections() def unfuse_qkv_projections(self) -> None: r"""Disable QKV projection fusion if enabled.""" if not self.fusing_transformer: logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.") else: self.transformer.unfuse_qkv_projections() self.fusing_transformer = False def _prepare_rotary_positional_embeddings( self, height: int, width: int, num_frames: int, device: torch.device, ) -> Tuple[torch.Tensor, torch.Tensor]: grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) p = self.transformer.config.patch_size p_t = self.transformer.config.patch_size_t base_size_width = self.transformer.config.sample_width // p base_size_height = self.transformer.config.sample_height // p if p_t is None: # CogVideoX 1.0 grid_crops_coords = get_resize_crop_region_for_grid( (grid_height, grid_width), base_size_width, base_size_height ) freqs_cos, freqs_sin = get_3d_rotary_pos_embed( embed_dim=self.transformer.config.attention_head_dim, crops_coords=grid_crops_coords, grid_size=(grid_height, grid_width), temporal_size=num_frames, device=device, ) else: # CogVideoX 1.5 base_num_frames = (num_frames + p_t - 1) // p_t freqs_cos, freqs_sin = get_3d_rotary_pos_embed( embed_dim=self.transformer.config.attention_head_dim, crops_coords=None, grid_size=(grid_height, grid_width), temporal_size=base_num_frames, grid_type="slice", max_size=(base_size_height, base_size_width), device=device, ) return freqs_cos, freqs_sin @property def guidance_scale(self): return self._guidance_scale @property def do_spatio_temporal_guidance(self): return self._stg_scale > 0.0 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Optional[Union[str, List[str]]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_frames: Optional[int] = None, num_inference_steps: int = 50, timesteps: Optional[List[int]] = None, guidance_scale: float = 6, use_dynamic_cfg: bool = False, num_videos_per_prompt: int = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: str = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 226, stg_applied_layers_idx: Optional[List[int]] = [11], stg_scale: Optional[float] = 0.0, do_rescaling: Optional[bool] = False, ) -> Union[CogVideoXPipelineOutput, Tuple]: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, *optional*, defaults to self.transformer.config.sample_height * self.vae_scale_factor_spatial): The width in pixels of the generated image. This is set to 720 by default for the best results. num_frames (`int`, defaults to `48`): Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will contain 1 extra frame because CogVideoX is conditioned with (num_seconds * fps + 1) frames where num_seconds is 6 and fps is 8. However, since videos can be saved at any fps, the only condition that needs to be satisfied is that of divisibility mentioned above. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 7.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `226`): Maximum sequence length in encoded prompt. Must be consistent with `self.transformer.config.max_text_seq_length` otherwise may lead to poor results. Examples: Returns: [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipelineOutput`] or `tuple`: [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_height * self.vae_scale_factor_spatial width = width or self.transformer.config.sample_width * self.vae_scale_factor_spatial num_frames = num_frames or self.transformer.config.sample_frames num_videos_per_prompt = 1 # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds, negative_prompt_embeds, ) self._stg_scale = stg_scale self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False if self.do_spatio_temporal_guidance: for i in stg_applied_layers_idx: self.transformer.transformer_blocks[i].forward = types.MethodType( forward_with_stg, self.transformer.transformer_blocks[i] ) # 2. Default call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, negative_prompt, do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) if do_classifier_free_guidance and not self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) elif do_classifier_free_guidance and self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, prompt_embeds], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) self._num_timesteps = len(timesteps) # 5. Prepare latents latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 # For CogVideoX 1.5, the latent frames should be padded to make it divisible by patch_size_t patch_size_t = self.transformer.config.patch_size_t additional_frames = 0 if patch_size_t is not None and latent_frames % patch_size_t != 0: additional_frames = patch_size_t - latent_frames % patch_size_t num_frames += additional_frames * self.vae_scale_factor_temporal latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, latent_channels, num_frames, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) if self.transformer.config.use_rotary_positional_embeddings else None ) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: # for DPM-solver++ old_pred_original_sample = None for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t if do_classifier_free_guidance and not self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 2) elif do_classifier_free_guidance and self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 3) else: latent_model_input = latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) # predict noise model_output noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, image_rotary_emb=image_rotary_emb, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred.float() # perform guidance if use_dynamic_cfg: self._guidance_scale = 1 + guidance_scale * ( (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2 ) if do_classifier_free_guidance and not self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) elif do_classifier_free_guidance and self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text, noise_pred_perturb = noise_pred.chunk(3) noise_pred = ( noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + self._stg_scale * (noise_pred_text - noise_pred_perturb) ) if do_rescaling: rescaling_scale = 0.7 factor = noise_pred_text.std() / noise_pred.std() factor = rescaling_scale * factor + (1 - rescaling_scale) noise_pred = noise_pred * factor # compute the previous noisy sample x_t -> x_t-1 if not isinstance(self.scheduler, CogVideoXDPMScheduler): latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] else: latents, old_pred_original_sample = self.scheduler.step( noise_pred, old_pred_original_sample, t, timesteps[i - 1] if i > 0 else None, latents, **extra_step_kwargs, return_dict=False, ) latents = latents.to(prompt_embeds.dtype) # call the callback, if provided if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": # Discard any padding frames that were added for CogVideoX 1.5 latents = latents[:, additional_frames:] video = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return CogVideoXPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipeline_stg_cogvideox.py", "license": "Apache License 2.0", "lines": 757, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/community/pipeline_stg_hunyuan_video.py
# Copyright 2025 The HunyuanVideo Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import types from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.loaders import HunyuanVideoLoraLoaderMixin from diffusers.models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3DModel from diffusers.pipelines.hunyuan_video.pipeline_output import HunyuanVideoPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers.utils import export_to_video >>> from diffusers import HunyuanVideoTransformer3DModel >>> from examples.community.pipeline_stg_hunyuan_video import HunyuanVideoSTGPipeline >>> model_id = "hunyuanvideo-community/HunyuanVideo" >>> transformer = HunyuanVideoTransformer3DModel.from_pretrained( ... model_id, subfolder="transformer", torch_dtype=torch.bfloat16 ... ) >>> pipe = HunyuanVideoSTGPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=torch.float16) >>> pipe.vae.enable_tiling() >>> pipe.to("cuda") >>> # Configure STG mode options >>> stg_applied_layers_idx = [2] # Layer indices from 0 to 41 >>> stg_scale = 1.0 # Set 0.0 for CFG >>> output = pipe( ... prompt="A wolf howling at the moon, with the moon subtly resembling a giant clock face, realistic style.", ... height=320, ... width=512, ... num_frames=61, ... num_inference_steps=30, ... stg_applied_layers_idx=stg_applied_layers_idx, ... stg_scale=stg_scale, >>> ).frames[0] >>> export_to_video(output, "output.mp4", fps=15) ``` """ DEFAULT_PROMPT_TEMPLATE = { "template": ( "<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: " "1. The main content and theme of the video." "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects." "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects." "4. background environment, light, style and atmosphere." "5. camera angles, movements, and transitions used in the video:<|eot_id|>" "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>" ), "crop_start": 95, } def forward_with_stg( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: return hidden_states, encoder_hidden_states def forward_without_stg( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: # 1. Input normalization norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) # 2. Joint attention attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=attention_mask, image_rotary_emb=freqs_cis, ) # 3. Modulation and residual connection hidden_states = hidden_states + attn_output * gate_msa.unsqueeze(1) encoder_hidden_states = encoder_hidden_states + context_attn_output * c_gate_msa.unsqueeze(1) norm_hidden_states = self.norm2(hidden_states) norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] # 4. Feed-forward ff_output = self.ff(norm_hidden_states) context_ff_output = self.ff_context(norm_encoder_hidden_states) hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output return hidden_states, encoder_hidden_states # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): r""" Pipeline for text-to-video generation using HunyuanVideo. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: text_encoder ([`LlamaModel`]): [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). tokenizer (`LlamaTokenizer`): Tokenizer from [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). transformer ([`HunyuanVideoTransformer3DModel`]): Conditional Transformer to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLHunyuanVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder_2 ([`CLIPTextModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer_2 (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, text_encoder: LlamaModel, tokenizer: LlamaTokenizerFast, transformer: HunyuanVideoTransformer3DModel, vae: AutoencoderKLHunyuanVideo, scheduler: FlowMatchEulerDiscreteScheduler, text_encoder_2: CLIPTextModel, tokenizer_2: CLIPTokenizer, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, ) self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_llama_prompt_embeds( self, prompt: Union[str, List[str]], prompt_template: Dict[str, Any], num_videos_per_prompt: int = 1, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, max_sequence_length: int = 256, num_hidden_layers_to_skip: int = 2, ) -> Tuple[torch.Tensor, torch.Tensor]: device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) prompt = [prompt_template["template"].format(p) for p in prompt] crop_start = prompt_template.get("crop_start", None) if crop_start is None: prompt_template_input = self.tokenizer( prompt_template["template"], padding="max_length", return_tensors="pt", return_length=False, return_overflowing_tokens=False, return_attention_mask=False, ) crop_start = prompt_template_input["input_ids"].shape[-1] # Remove <|eot_id|> token and placeholder {} crop_start -= 2 max_sequence_length += crop_start text_inputs = self.tokenizer( prompt, max_length=max_sequence_length, padding="max_length", truncation=True, return_tensors="pt", return_length=False, return_overflowing_tokens=False, return_attention_mask=True, ) text_input_ids = text_inputs.input_ids.to(device=device) prompt_attention_mask = text_inputs.attention_mask.to(device=device) prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True, ).hidden_states[-(num_hidden_layers_to_skip + 1)] prompt_embeds = prompt_embeds.to(dtype=dtype) if crop_start is not None and crop_start > 0: prompt_embeds = prompt_embeds[:, crop_start:] prompt_attention_mask = prompt_attention_mask[:, crop_start:] # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.repeat(1, num_videos_per_prompt) prompt_attention_mask = prompt_attention_mask.view(batch_size * num_videos_per_prompt, seq_len) return prompt_embeds, prompt_attention_mask def _get_clip_prompt_embeds( self, prompt: Union[str, List[str]], num_videos_per_prompt: int = 1, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, max_sequence_length: int = 77, ) -> torch.Tensor: device = device or self._execution_device dtype = dtype or self.text_encoder_2.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer_2( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, -1) return prompt_embeds def encode_prompt( self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]] = None, prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, max_sequence_length: int = 256, ): if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_llama_prompt_embeds( prompt, prompt_template, num_videos_per_prompt, device=device, dtype=dtype, max_sequence_length=max_sequence_length, ) if pooled_prompt_embeds is None: if prompt_2 is None and pooled_prompt_embeds is None: prompt_2 = prompt pooled_prompt_embeds = self._get_clip_prompt_embeds( prompt, num_videos_per_prompt, device=device, dtype=dtype, max_sequence_length=77, ) return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask def check_inputs( self, prompt, prompt_2, height, width, prompt_embeds=None, callback_on_step_end_tensor_inputs=None, prompt_template=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if prompt_template is not None: if not isinstance(prompt_template, dict): raise ValueError(f"`prompt_template` has to be of type `dict` but is {type(prompt_template)}") if "template" not in prompt_template: raise ValueError( f"`prompt_template` has to contain a key `template` but only found {prompt_template.keys()}" ) def prepare_latents( self, batch_size: int, num_channels_latents: 32, height: int = 720, width: int = 1280, num_frames: int = 129, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, num_frames, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." deprecate( "enable_vae_slicing", "0.40.0", depr_message, ) self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." deprecate( "disable_vae_slicing", "0.40.0", depr_message, ) self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." deprecate( "enable_vae_tiling", "0.40.0", depr_message, ) self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." deprecate( "disable_vae_tiling", "0.40.0", depr_message, ) self.vae.disable_tiling() @property def guidance_scale(self): return self._guidance_scale @property def do_spatio_temporal_guidance(self): return self._stg_scale > 0.0 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Union[str, List[str]] = None, height: int = 720, width: int = 1280, num_frames: int = 129, num_inference_steps: int = 50, sigmas: List[float] = None, guidance_scale: float = 6.0, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] ] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], prompt_template: Dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, max_sequence_length: int = 256, stg_applied_layers_idx: Optional[List[int]] = [2], stg_scale: Optional[float] = 0.0, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is will be used instead. height (`int`, defaults to `720`): The height in pixels of the generated image. width (`int`, defaults to `1280`): The width in pixels of the generated image. num_frames (`int`, defaults to `129`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, defaults to `6.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. Note that the only available HunyuanVideo model is CFG-distilled, which means that traditional guidance between unconditional and conditional latent is not applied. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~HunyuanVideoPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, prompt_embeds, callback_on_step_end_tensor_inputs, prompt_template, ) self._stg_scale = stg_scale self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_template=prompt_template, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, prompt_attention_mask=prompt_attention_mask, device=device, max_sequence_length=max_sequence_length, ) transformer_dtype = self.transformer.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) prompt_attention_mask = prompt_attention_mask.to(transformer_dtype) if pooled_prompt_embeds is not None: pooled_prompt_embeds = pooled_prompt_embeds.to(transformer_dtype) # 4. Prepare timesteps sigmas = np.linspace(1.0, 0.0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, ) # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_latent_frames, torch.float32, device, generator, latents, ) # 6. Prepare guidance condition guidance = torch.tensor([guidance_scale] * latents.shape[0], dtype=transformer_dtype, device=device) * 1000.0 # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = latents.to(transformer_dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) if self.do_spatio_temporal_guidance: for i in stg_applied_layers_idx: self.transformer.transformer_blocks[i].forward = types.MethodType( forward_without_stg, self.transformer.transformer_blocks[i] ) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, pooled_projections=pooled_prompt_embeds, guidance=guidance, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_spatio_temporal_guidance: for i in stg_applied_layers_idx: self.transformer.transformer_blocks[i].forward = types.MethodType( forward_with_stg, self.transformer.transformer_blocks[i] ) noise_pred_perturb = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, pooled_projections=pooled_prompt_embeds, guidance=guidance, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred + self._stg_scale * (noise_pred - noise_pred_perturb) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return HunyuanVideoPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipeline_stg_hunyuan_video.py", "license": "Apache License 2.0", "lines": 714, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/community/pipeline_stg_ltx.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import types from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import T5EncoderModel, T5TokenizerFast from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.loaders import FromSingleFileMixin, LTXVideoLoraLoaderMixin from diffusers.models.autoencoders import AutoencoderKLLTXVideo from diffusers.models.transformers import LTXVideoTransformer3DModel from diffusers.pipelines.ltx.pipeline_output import LTXPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers.utils import export_to_video >>> from examples.community.pipeline_stg_ltx import LTXSTGPipeline >>> pipe = LTXSTGPipeline.from_pretrained("Lightricks/LTX-Video", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> prompt = "A woman with light skin, wearing a blue jacket and a black hat with a veil, looks down and to her right, then back up as she speaks; she has brown hair styled in an updo, light brown eyebrows, and is wearing a white collared shirt under her jacket; the camera remains stationary on her face as she speaks; the background is out of focus, but shows trees and people in period clothing; the scene is captured in real-life footage." >>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" >>> # Configure STG mode options >>> stg_applied_layers_idx = [19] # Layer indices from 0 to 41 >>> stg_scale = 1.0 # Set 0.0 for CFG >>> do_rescaling = False >>> video = pipe( ... prompt=prompt, ... negative_prompt=negative_prompt, ... width=704, ... height=480, ... num_frames=161, ... num_inference_steps=50, ... stg_applied_layers_idx=stg_applied_layers_idx, ... stg_scale=stg_scale, ... do_rescaling=do_rescaling, >>> ).frames[0] >>> export_to_video(video, "output.mp4", fps=24) ``` """ def forward_with_stg( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states_ptb = hidden_states[2:] encoder_hidden_states_ptb = encoder_hidden_states[2:] batch_size = hidden_states.size(0) norm_hidden_states = self.norm1(hidden_states) num_ada_params = self.scale_shift_table.shape[0] ada_values = self.scale_shift_table[None, None] + temb.reshape(batch_size, temb.size(1), num_ada_params, -1) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ada_values.unbind(dim=2) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa attn_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=None, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + attn_hidden_states * gate_msa attn_hidden_states = self.attn2( hidden_states, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=None, attention_mask=encoder_attention_mask, ) hidden_states = hidden_states + attn_hidden_states norm_hidden_states = self.norm2(hidden_states) * (1 + scale_mlp) + shift_mlp ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + ff_output * gate_mlp hidden_states[2:] = hidden_states_ptb encoder_hidden_states[2:] = encoder_hidden_states_ptb return hidden_states # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.16, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class LTXSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixin): r""" Pipeline for text-to-video generation. Reference: https://github.com/Lightricks/LTX-Video Args: transformer ([`LTXVideoTransformer3DModel`]): Conditional Transformer architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLLTXVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLLTXVideo, text_encoder: T5EncoderModel, tokenizer: T5TokenizerFast, transformer: LTXVideoTransformer3DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.transformer_spatial_patch_size = ( self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 ) self.transformer_temporal_patch_size = ( self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.tokenizer_max_length = ( self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128 ) def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.mochi.pipeline_mochi.MochiPipeline.encode_prompt with 256->128 def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask def check_inputs( self, prompt, height, width, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) @staticmethod def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor: # Unpacked latents of shape are [B, C, F, H, W] are patched into tokens of shape [B, C, F // p_t, p_t, H // p, p, W // p, p]. # The patch dimensions are then permuted and collapsed into the channel dimension of shape: # [B, F // p_t * H // p * W // p, C * p_t * p * p] (an ndim=3 tensor). # dim=0 is the batch size, dim=1 is the effective video sequence length, dim=2 is the effective number of input features batch_size, num_channels, num_frames, height, width = latents.shape post_patch_num_frames = num_frames // patch_size_t post_patch_height = height // patch_size post_patch_width = width // patch_size latents = latents.reshape( batch_size, -1, post_patch_num_frames, patch_size_t, post_patch_height, patch_size, post_patch_width, patch_size, ) latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3) return latents @staticmethod def _unpack_latents( latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1 ) -> torch.Tensor: # Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions) # are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of # what happens in the `_pack_latents` method. batch_size = latents.size(0) latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size) latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3) return latents @staticmethod def _normalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Normalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = (latents - latents_mean) * scaling_factor / latents_std return latents @staticmethod def _denormalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Denormalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / scaling_factor + latents_mean return latents def prepare_latents( self, batch_size: int = 1, num_channels_latents: int = 128, height: int = 512, width: int = 704, num_frames: int = 161, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: torch.Generator | None = None, latents: Optional[torch.Tensor] = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) height = height // self.vae_spatial_compression_ratio width = width // self.vae_spatial_compression_ratio num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 shape = (batch_size, num_channels_latents, num_frames, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def do_spatio_temporal_guidance(self): return self._stg_scale > 0.0 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 704, num_frames: int = 161, frame_rate: int = 25, num_inference_steps: int = 50, timesteps: List[int] = None, guidance_scale: float = 3, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, decode_timestep: Union[float, List[float]] = 0.0, decode_noise_scale: Optional[Union[float, List[float]]] = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 128, stg_applied_layers_idx: Optional[List[int]] = [19], stg_scale: Optional[float] = 1.0, do_rescaling: Optional[bool] = False, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, defaults to `512`): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, defaults to `704`): The width in pixels of the generated image. This is set to 848 by default for the best results. num_frames (`int`, defaults to `161`): The number of video frames to generate num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, defaults to `3 `): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. decode_timestep (`float`, defaults to `0.0`): The timestep at which generated video is decoded. decode_noise_scale (`float`, defaults to `None`): The interpolation factor between random noise and denoised latents at the decode timestep. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ltx.LTXPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `128 `): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ltx.LTXPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ltx.LTXPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) self._stg_scale = stg_scale self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False if self.do_spatio_temporal_guidance: for i in stg_applied_layers_idx: self.transformer.transformer_blocks[i].forward = types.MethodType( forward_with_stg, self.transformer.transformer_blocks[i] ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Prepare text embeddings ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, device=device, ) if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat( [negative_prompt_attention_mask, prompt_attention_mask, prompt_attention_mask], dim=0 ) # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, ) # 5. Prepare timesteps latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio video_sequence_length = latent_num_frames * latent_height * latent_width sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) mu = calculate_shift( video_sequence_length, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.16), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Prepare micro-conditions latent_frame_rate = frame_rate / self.vae_temporal_compression_ratio rope_interpolation_scale = ( 1 / latent_frame_rate, self.vae_spatial_compression_ratio, self.vae_spatial_compression_ratio, ) # 7. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 2) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 3) else: latent_model_input = latents latent_model_input = latent_model_input.to(prompt_embeds.dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, encoder_attention_mask=prompt_attention_mask, num_frames=latent_num_frames, height=latent_height, width=latent_width, rope_interpolation_scale=rope_interpolation_scale, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred.float() if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text, noise_pred_perturb = noise_pred.chunk(3) noise_pred = ( noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + self._stg_scale * (noise_pred_text - noise_pred_perturb) ) if do_rescaling: rescaling_scale = 0.7 factor = noise_pred_text.std() / noise_pred.std() factor = rescaling_scale * factor + (1 - rescaling_scale) noise_pred = noise_pred * factor # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": video = latents else: latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) latents = self._denormalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) latents = latents.to(prompt_embeds.dtype) if not self.vae.config.timestep_conditioning: timestep = None else: noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) if not isinstance(decode_timestep, list): decode_timestep = [decode_timestep] * batch_size if decode_noise_scale is None: decode_noise_scale = decode_timestep elif not isinstance(decode_noise_scale, list): decode_noise_scale = [decode_noise_scale] * batch_size timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype) decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[ :, None, None, None, None ] latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise video = self.vae.decode(latents, timestep, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return LTXPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipeline_stg_ltx.py", "license": "Apache License 2.0", "lines": 779, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/community/pipeline_stg_ltx_image2video.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import types from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import T5EncoderModel, T5TokenizerFast from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.image_processor import PipelineImageInput from diffusers.loaders import FromSingleFileMixin, LTXVideoLoraLoaderMixin from diffusers.models.autoencoders import AutoencoderKLLTXVideo from diffusers.models.transformers import LTXVideoTransformer3DModel from diffusers.pipelines.ltx.pipeline_output import LTXPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers.utils import export_to_video, load_image >>> from examples.community.pipeline_stg_ltx_image2video import LTXImageToVideoSTGPipeline >>> pipe = LTXImageToVideoSTGPipeline.from_pretrained("Lightricks/LTX-Video", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> image = load_image( ... "https://huggingface.co/datasets/a-r-r-o-w/tiny-meme-dataset-captioned/resolve/main/images/11.png" >>> ) >>> prompt = "A medieval fantasy scene featuring a rugged man with shoulder-length brown hair and a beard. He wears a dark leather tunic over a maroon shirt with intricate metal details. His facial expression is serious and intense, and he is making a gesture with his right hand, forming a small circle with his thumb and index finger. The warm golden lighting casts dramatic shadows on his face. The background includes an ornate stone arch and blurred medieval-style decor, creating an epic atmosphere." >>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" >>> # Configure STG mode options >>> stg_applied_layers_idx = [19] # Layer indices from 0 to 41 >>> stg_scale = 1.0 # Set 0.0 for CFG >>> do_rescaling = False >>> video = pipe( ... image=image, ... prompt=prompt, ... negative_prompt=negative_prompt, ... width=704, ... height=480, ... num_frames=161, ... num_inference_steps=50, ... stg_applied_layers_idx=stg_applied_layers_idx, ... stg_scale=stg_scale, ... do_rescaling=do_rescaling, >>> ).frames[0] >>> export_to_video(video, "output.mp4", fps=24) ``` """ def forward_with_stg( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, encoder_attention_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states_ptb = hidden_states[2:] encoder_hidden_states_ptb = encoder_hidden_states[2:] batch_size = hidden_states.size(0) norm_hidden_states = self.norm1(hidden_states) num_ada_params = self.scale_shift_table.shape[0] ada_values = self.scale_shift_table[None, None] + temb.reshape(batch_size, temb.size(1), num_ada_params, -1) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ada_values.unbind(dim=2) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa attn_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=None, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + attn_hidden_states * gate_msa attn_hidden_states = self.attn2( hidden_states, encoder_hidden_states=encoder_hidden_states, image_rotary_emb=None, attention_mask=encoder_attention_mask, ) hidden_states = hidden_states + attn_hidden_states norm_hidden_states = self.norm2(hidden_states) * (1 + scale_mlp) + shift_mlp ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + ff_output * gate_mlp hidden_states[2:] = hidden_states_ptb encoder_hidden_states[2:] = encoder_hidden_states_ptb return hidden_states # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.16, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class LTXImageToVideoSTGPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixin): r""" Pipeline for image-to-video generation. Reference: https://github.com/Lightricks/LTX-Video Args: transformer ([`LTXVideoTransformer3DModel`]): Conditional Transformer architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLLTXVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLLTXVideo, text_encoder: T5EncoderModel, tokenizer: T5TokenizerFast, transformer: LTXVideoTransformer3DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.transformer_spatial_patch_size = ( self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 ) self.transformer_temporal_patch_size = ( self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.tokenizer_max_length = ( self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128 ) self.default_height = 512 self.default_width = 704 self.default_frames = 121 def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.mochi.pipeline_mochi.MochiPipeline.encode_prompt with 256->128 def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: int = 128, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._pack_latents def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor: # Unpacked latents of shape are [B, C, F, H, W] are patched into tokens of shape [B, C, F // p_t, p_t, H // p, p, W // p, p]. # The patch dimensions are then permuted and collapsed into the channel dimension of shape: # [B, F // p_t * H // p * W // p, C * p_t * p * p] (an ndim=3 tensor). # dim=0 is the batch size, dim=1 is the effective video sequence length, dim=2 is the effective number of input features batch_size, num_channels, num_frames, height, width = latents.shape post_patch_num_frames = num_frames // patch_size_t post_patch_height = height // patch_size post_patch_width = width // patch_size latents = latents.reshape( batch_size, -1, post_patch_num_frames, patch_size_t, post_patch_height, patch_size, post_patch_width, patch_size, ) latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3) return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._unpack_latents def _unpack_latents( latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1 ) -> torch.Tensor: # Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions) # are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of # what happens in the `_pack_latents` method. batch_size = latents.size(0) latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size) latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3) return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._normalize_latents def _normalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Normalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = (latents - latents_mean) * scaling_factor / latents_std return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._denormalize_latents def _denormalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Denormalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / scaling_factor + latents_mean return latents def prepare_latents( self, image: Optional[torch.Tensor] = None, batch_size: int = 1, num_channels_latents: int = 128, height: int = 512, width: int = 704, num_frames: int = 161, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, generator: torch.Generator | None = None, latents: Optional[torch.Tensor] = None, ) -> torch.Tensor: height = height // self.vae_spatial_compression_ratio width = width // self.vae_spatial_compression_ratio num_frames = ( (num_frames - 1) // self.vae_temporal_compression_ratio + 1 if latents is None else latents.size(2) ) shape = (batch_size, num_channels_latents, num_frames, height, width) mask_shape = (batch_size, 1, num_frames, height, width) if latents is not None: conditioning_mask = latents.new_zeros(shape) conditioning_mask[:, :, 0] = 1.0 conditioning_mask = self._pack_latents( conditioning_mask, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) return latents.to(device=device, dtype=dtype), conditioning_mask if isinstance(generator, list): if len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) init_latents = [ retrieve_latents(self.vae.encode(image[i].unsqueeze(0).unsqueeze(2)), generator[i]) for i in range(batch_size) ] else: init_latents = [ retrieve_latents(self.vae.encode(img.unsqueeze(0).unsqueeze(2)), generator) for img in image ] init_latents = torch.cat(init_latents, dim=0).to(dtype) init_latents = self._normalize_latents(init_latents, self.vae.latents_mean, self.vae.latents_std) init_latents = init_latents.repeat(1, 1, num_frames, 1, 1) conditioning_mask = torch.zeros(mask_shape, device=device, dtype=dtype) conditioning_mask[:, :, 0] = 1.0 noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = init_latents * conditioning_mask + noise * (1 - conditioning_mask) conditioning_mask = self._pack_latents( conditioning_mask, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ).squeeze(-1) latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) return latents, conditioning_mask @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def do_spatio_temporal_guidance(self): return self._stg_scale > 0.0 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput = None, prompt: Union[str, List[str]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 704, num_frames: int = 161, frame_rate: int = 25, num_inference_steps: int = 50, timesteps: List[int] = None, guidance_scale: float = 3, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, decode_timestep: Union[float, List[float]] = 0.0, decode_noise_scale: Optional[Union[float, List[float]]] = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 128, stg_applied_layers_idx: Optional[List[int]] = [19], stg_scale: Optional[float] = 1.0, do_rescaling: Optional[bool] = False, ): r""" Function invoked when calling the pipeline for generation. Args: image (`PipelineImageInput`): The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`. prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, defaults to `512`): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, defaults to `704`): The width in pixels of the generated image. This is set to 848 by default for the best results. num_frames (`int`, defaults to `161`): The number of video frames to generate num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, defaults to `3 `): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. decode_timestep (`float`, defaults to `0.0`): The timestep at which generated video is decoded. decode_noise_scale (`float`, defaults to `None`): The interpolation factor between random noise and denoised latents at the decode timestep. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ltx.LTXPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `128 `): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ltx.LTXPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ltx.LTXPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) self._stg_scale = stg_scale self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False if self.do_spatio_temporal_guidance: for i in stg_applied_layers_idx: self.transformer.transformer_blocks[i].forward = types.MethodType( forward_with_stg, self.transformer.transformer_blocks[i] ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Prepare text embeddings ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, device=device, ) if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat( [negative_prompt_attention_mask, prompt_attention_mask, prompt_attention_mask], dim=0 ) # 4. Prepare latent variables if latents is None: image = self.video_processor.preprocess(image, height=height, width=width) image = image.to(device=device, dtype=prompt_embeds.dtype) num_channels_latents = self.transformer.config.in_channels latents, conditioning_mask = self.prepare_latents( image, batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, ) if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: conditioning_mask = torch.cat([conditioning_mask, conditioning_mask]) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: conditioning_mask = torch.cat([conditioning_mask, conditioning_mask, conditioning_mask]) # 5. Prepare timesteps latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio video_sequence_length = latent_num_frames * latent_height * latent_width sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) mu = calculate_shift( video_sequence_length, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.16), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Prepare micro-conditions latent_frame_rate = frame_rate / self.vae_temporal_compression_ratio rope_interpolation_scale = ( 1 / latent_frame_rate, self.vae_spatial_compression_ratio, self.vae_spatial_compression_ratio, ) # 7. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 2) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 3) else: latent_model_input = latents latent_model_input = latent_model_input.to(prompt_embeds.dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) timestep = timestep.unsqueeze(-1) * (1 - conditioning_mask) noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, encoder_attention_mask=prompt_attention_mask, num_frames=latent_num_frames, height=latent_height, width=latent_width, rope_interpolation_scale=rope_interpolation_scale, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred.float() if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) timestep, _ = timestep.chunk(2) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text, noise_pred_perturb = noise_pred.chunk(3) noise_pred = ( noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + self._stg_scale * (noise_pred_text - noise_pred_perturb) ) timestep, _, _ = timestep.chunk(3) if do_rescaling: rescaling_scale = 0.7 factor = noise_pred_text.std() / noise_pred.std() factor = rescaling_scale * factor + (1 - rescaling_scale) noise_pred = noise_pred * factor # compute the previous noisy sample x_t -> x_t-1 noise_pred = self._unpack_latents( noise_pred, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) noise_pred = noise_pred[:, :, 1:] noise_latents = latents[:, :, 1:] pred_latents = self.scheduler.step(noise_pred, t, noise_latents, return_dict=False)[0] latents = torch.cat([latents[:, :, :1], pred_latents], dim=2) latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": video = latents else: latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) latents = self._denormalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) latents = latents.to(prompt_embeds.dtype) if not self.vae.config.timestep_conditioning: timestep = None else: noise = torch.randn(latents.shape, generator=generator, device=device, dtype=latents.dtype) if not isinstance(decode_timestep, list): decode_timestep = [decode_timestep] * batch_size if decode_noise_scale is None: decode_noise_scale = decode_timestep elif not isinstance(decode_noise_scale, list): decode_noise_scale = [decode_noise_scale] * batch_size timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype) decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[ :, None, None, None, None ] latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise video = self.vae.decode(latents, timestep, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return LTXPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipeline_stg_ltx_image2video.py", "license": "Apache License 2.0", "lines": 867, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/community/pipeline_stg_mochi.py
# Copyright 2025 Genmo and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import types from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch from transformers import T5EncoderModel, T5TokenizerFast from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback from diffusers.loaders import Mochi1LoraLoaderMixin from diffusers.models import AutoencoderKLMochi, MochiTransformer3DModel from diffusers.pipelines.mochi.pipeline_output import MochiPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers.utils import export_to_video >>> from examples.community.pipeline_stg_mochi import MochiSTGPipeline >>> pipe = MochiSTGPipeline.from_pretrained("genmo/mochi-1-preview", torch_dtype=torch.bfloat16) >>> pipe.enable_model_cpu_offload() >>> pipe.enable_vae_tiling() >>> prompt = "A close-up of a beautiful woman's face with colored powder exploding around her, creating an abstract splash of vibrant hues, realistic style." >>> # Configure STG mode options >>> stg_applied_layers_idx = [34] # Layer indices from 0 to 41 >>> stg_scale = 1.0 # Set 0.0 for CFG >>> do_rescaling = False >>> frames = pipe( ... prompt=prompt, ... num_inference_steps=28, ... guidance_scale=3.5, ... stg_applied_layers_idx=stg_applied_layers_idx, ... stg_scale=stg_scale, ... do_rescaling=do_rescaling).frames[0] >>> export_to_video(frames, "mochi.mp4") ``` """ def forward_with_stg( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, encoder_attention_mask: torch.Tensor, image_rotary_emb: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: hidden_states_ptb = hidden_states[2:] encoder_hidden_states_ptb = encoder_hidden_states[2:] norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb) if not self.context_pre_only: norm_encoder_hidden_states, enc_gate_msa, enc_scale_mlp, enc_gate_mlp = self.norm1_context( encoder_hidden_states, temb ) else: norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, temb) attn_hidden_states, context_attn_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, attention_mask=encoder_attention_mask, ) hidden_states = hidden_states + self.norm2(attn_hidden_states, torch.tanh(gate_msa).unsqueeze(1)) norm_hidden_states = self.norm3(hidden_states, (1 + scale_mlp.unsqueeze(1).to(torch.float32))) ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + self.norm4(ff_output, torch.tanh(gate_mlp).unsqueeze(1)) if not self.context_pre_only: encoder_hidden_states = encoder_hidden_states + self.norm2_context( context_attn_hidden_states, torch.tanh(enc_gate_msa).unsqueeze(1) ) norm_encoder_hidden_states = self.norm3_context( encoder_hidden_states, (1 + enc_scale_mlp.unsqueeze(1).to(torch.float32)) ) context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + self.norm4_context( context_ff_output, torch.tanh(enc_gate_mlp).unsqueeze(1) ) hidden_states[2:] = hidden_states_ptb encoder_hidden_states[2:] = encoder_hidden_states_ptb return hidden_states, encoder_hidden_states # from: https://github.com/genmoai/models/blob/075b6e36db58f1242921deff83a1066887b9c9e1/src/mochi_preview/infer.py#L77 def linear_quadratic_schedule(num_steps, threshold_noise, linear_steps=None): if linear_steps is None: linear_steps = num_steps // 2 linear_sigma_schedule = [i * threshold_noise / linear_steps for i in range(linear_steps)] threshold_noise_step_diff = linear_steps - threshold_noise * num_steps quadratic_steps = num_steps - linear_steps quadratic_coef = threshold_noise_step_diff / (linear_steps * quadratic_steps**2) linear_coef = threshold_noise / linear_steps - 2 * threshold_noise_step_diff / (quadratic_steps**2) const = quadratic_coef * (linear_steps**2) quadratic_sigma_schedule = [ quadratic_coef * (i**2) + linear_coef * i + const for i in range(linear_steps, num_steps) ] sigma_schedule = linear_sigma_schedule + quadratic_sigma_schedule sigma_schedule = [1.0 - x for x in sigma_schedule] return sigma_schedule # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom value") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): r""" The mochi pipeline for text-to-video generation. Reference: https://github.com/genmoai/models Args: transformer ([`MochiTransformer3DModel`]): Conditional Transformer architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLMochi`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLMochi, text_encoder: T5EncoderModel, tokenizer: T5TokenizerFast, transformer: MochiTransformer3DModel, force_zeros_for_empty_prompt: bool = False, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) # TODO: determine these scaling factors from model parameters self.vae_spatial_scale_factor = 8 self.vae_temporal_scale_factor = 6 self.patch_size = 2 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_scale_factor) self.tokenizer_max_length = ( self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 256 ) self.default_height = 480 self.default_width = 848 self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 256, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.bool().to(device) # The original Mochi implementation zeros out empty negative prompts # but this can lead to overflow when placing the entire pipeline under the autocast context # adding this here so that we can enable zeroing prompts if necessary if self.config.force_zeros_for_empty_prompt and (prompt == "" or prompt[-1] == ""): text_input_ids = torch.zeros_like(text_input_ids, device=device) prompt_attention_mask = torch.zeros_like(prompt_attention_mask, dtype=torch.bool, device=device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask # Adapted from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, max_sequence_length: int = 256, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask def check_inputs( self, prompt, height, width, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." deprecate( "enable_vae_slicing", "0.40.0", depr_message, ) self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." deprecate( "disable_vae_slicing", "0.40.0", depr_message, ) self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." deprecate( "enable_vae_tiling", "0.40.0", depr_message, ) self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." deprecate( "disable_vae_tiling", "0.40.0", depr_message, ) self.vae.disable_tiling() def prepare_latents( self, batch_size, num_channels_latents, height, width, num_frames, dtype, device, generator, latents=None, ): height = height // self.vae_spatial_scale_factor width = width // self.vae_spatial_scale_factor num_frames = (num_frames - 1) // self.vae_temporal_scale_factor + 1 shape = (batch_size, num_channels_latents, num_frames, height, width) if latents is not None: return latents.to(device=device, dtype=dtype) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=torch.float32) latents = latents.to(dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def do_spatio_temporal_guidance(self): return self._stg_scale > 0.0 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_frames: int = 19, num_inference_steps: int = 64, timesteps: List[int] = None, guidance_scale: float = 4.5, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 256, stg_applied_layers_idx: Optional[List[int]] = [34], stg_scale: Optional[float] = 0.0, do_rescaling: Optional[bool] = False, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to `self.default_height`): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, *optional*, defaults to `self.default_width`): The width in pixels of the generated image. This is set to 848 by default for the best results. num_frames (`int`, defaults to `19`): The number of video frames to generate num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, defaults to `4.5`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.mochi.MochiPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `256`): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.mochi.MochiPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.mochi.MochiPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.default_height width = width or self.default_width # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) self._guidance_scale = guidance_scale self._stg_scale = stg_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False if self.do_spatio_temporal_guidance: for i in stg_applied_layers_idx: self.transformer.transformer_blocks[i].forward = types.MethodType( forward_with_stg, self.transformer.transformer_blocks[i] ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Prepare text embeddings ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, device=device, ) # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, prompt_embeds.dtype, device, generator, latents, ) if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat( [negative_prompt_attention_mask, prompt_attention_mask, prompt_attention_mask], dim=0 ) # 5. Prepare timestep # from https://github.com/genmoai/models/blob/075b6e36db58f1242921deff83a1066887b9c9e1/src/mochi_preview/infer.py#L77 threshold_noise = 0.025 sigmas = linear_quadratic_schedule(num_inference_steps, threshold_noise) sigmas = np.array(sigmas) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # Note: Mochi uses reversed timesteps. To ensure compatibility with methods like FasterCache, we need # to make sure we're using the correct non-reversed timestep value. self._current_timestep = 1000 - t if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 2) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: latent_model_input = torch.cat([latents] * 3) else: latent_model_input = latents # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]).to(latents.dtype) noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, encoder_attention_mask=prompt_attention_mask, attention_kwargs=attention_kwargs, return_dict=False, )[0] # Mochi CFG + Sampling runs in FP32 noise_pred = noise_pred.to(torch.float32) if self.do_classifier_free_guidance and not self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) elif self.do_classifier_free_guidance and self.do_spatio_temporal_guidance: noise_pred_uncond, noise_pred_text, noise_pred_perturb = noise_pred.chunk(3) noise_pred = ( noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + self._stg_scale * (noise_pred_text - noise_pred_perturb) ) if do_rescaling: rescaling_scale = 0.7 factor = noise_pred_text.std() / noise_pred.std() factor = rescaling_scale * factor + (1 - rescaling_scale) noise_pred = noise_pred * factor # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents.to(torch.float32), return_dict=False)[0] latents = latents.to(latents_dtype) if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if output_type == "latent": video = latents else: # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 12, 1, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 12, 1, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return MochiPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipeline_stg_mochi.py", "license": "Apache License 2.0", "lines": 758, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/single_file/test_model_wan_autoencoder_single_file.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from diffusers import ( AutoencoderKLWan, ) from ..testing_utils import ( enable_full_determinism, ) from .single_file_testing_utils import SingleFileModelTesterMixin enable_full_determinism() class TestAutoencoderKLWanSingleFile(SingleFileModelTesterMixin): model_class = AutoencoderKLWan ckpt_path = ( "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/vae/wan_2.1_vae.safetensors" ) repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers" subfolder = "vae"
{ "repo_id": "huggingface/diffusers", "file_path": "tests/single_file/test_model_wan_autoencoder_single_file.py", "license": "Apache License 2.0", "lines": 29, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/single_file/test_model_wan_transformer3d_single_file.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from diffusers import ( WanTransformer3DModel, ) from ..testing_utils import ( enable_full_determinism, require_big_accelerator, ) from .single_file_testing_utils import SingleFileModelTesterMixin enable_full_determinism() class TestWanTransformer3DModelText2VideoSingleFile(SingleFileModelTesterMixin): model_class = WanTransformer3DModel ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors" repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers" subfolder = "transformer" @require_big_accelerator class TestWanTransformer3DModelImage2VideoSingleFile(SingleFileModelTesterMixin): model_class = WanTransformer3DModel ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors" repo_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" torch_dtype = torch.float8_e4m3fn subfolder = "transformer"
{ "repo_id": "huggingface/diffusers", "file_path": "tests/single_file/test_model_wan_transformer3d_single_file.py", "license": "Apache License 2.0", "lines": 36, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py
# Copyright 2025 The HunyuanVideo Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import numpy as np import PIL.Image import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, LlamaTokenizerFast, LlavaForConditionalGeneration, ) from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import HunyuanVideoLoraLoaderMixin from ...models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import HunyuanVideoPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import HunyuanVideoImageToVideoPipeline, HunyuanVideoTransformer3DModel >>> from diffusers.utils import load_image, export_to_video >>> # Available checkpoints: hunyuanvideo-community/HunyuanVideo-I2V, hunyuanvideo-community/HunyuanVideo-I2V-33ch >>> model_id = "hunyuanvideo-community/HunyuanVideo-I2V" >>> transformer = HunyuanVideoTransformer3DModel.from_pretrained( ... model_id, subfolder="transformer", torch_dtype=torch.bfloat16 ... ) >>> pipe = HunyuanVideoImageToVideoPipeline.from_pretrained( ... model_id, transformer=transformer, torch_dtype=torch.float16 ... ) >>> pipe.vae.enable_tiling() >>> pipe.to("cuda") >>> prompt = "A man with short gray hair plays a red electric guitar." >>> image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png" ... ) >>> # If using hunyuanvideo-community/HunyuanVideo-I2V >>> output = pipe(image=image, prompt=prompt, guidance_scale=6.0).frames[0] >>> # If using hunyuanvideo-community/HunyuanVideo-I2V-33ch >>> output = pipe(image=image, prompt=prompt, guidance_scale=1.0, true_cfg_scale=1.0).frames[0] >>> export_to_video(output, "output.mp4", fps=15) ``` """ DEFAULT_PROMPT_TEMPLATE = { "template": ( "<|start_header_id|>system<|end_header_id|>\n\n<image>\nDescribe the video by detailing the following aspects according to the reference image: " "1. The main content and theme of the video." "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects." "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects." "4. background environment, light, style and atmosphere." "5. camera angles, movements, and transitions used in the video:<|eot_id|>\n\n" "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>" "<|start_header_id|>assistant<|end_header_id|>\n\n" ), "crop_start": 103, "image_emb_start": 5, "image_emb_end": 581, "image_emb_len": 576, "double_return_token_id": 271, } def _expand_input_ids_with_image_tokens( text_input_ids, prompt_attention_mask, max_sequence_length, image_token_index, image_emb_len, image_emb_start, image_emb_end, pad_token_id, ): special_image_token_mask = text_input_ids == image_token_index num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1) batch_indices, non_image_indices = torch.where(text_input_ids != image_token_index) max_expanded_length = max_sequence_length + (num_special_image_tokens.max() * (image_emb_len - 1)) new_token_positions = torch.cumsum((special_image_token_mask * (image_emb_len - 1) + 1), -1) - 1 text_to_overwrite = new_token_positions[batch_indices, non_image_indices] expanded_input_ids = torch.full( (text_input_ids.shape[0], max_expanded_length), pad_token_id, dtype=text_input_ids.dtype, device=text_input_ids.device, ) expanded_input_ids[batch_indices, text_to_overwrite] = text_input_ids[batch_indices, non_image_indices] expanded_input_ids[batch_indices, image_emb_start:image_emb_end] = image_token_index expanded_attention_mask = torch.zeros( (text_input_ids.shape[0], max_expanded_length), dtype=prompt_attention_mask.dtype, device=prompt_attention_mask.device, ) attn_batch_indices, attention_indices = torch.where(expanded_input_ids != pad_token_id) expanded_attention_mask[attn_batch_indices, attention_indices] = 1.0 expanded_attention_mask = expanded_attention_mask.to(prompt_attention_mask.dtype) position_ids = (expanded_attention_mask.cumsum(-1) - 1).masked_fill_((expanded_attention_mask == 0), 1) return { "input_ids": expanded_input_ids, "attention_mask": expanded_attention_mask, "position_ids": position_ids, } # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class HunyuanVideoImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): r""" Pipeline for image-to-video generation using HunyuanVideo. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: text_encoder ([`LlavaForConditionalGeneration`]): [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). tokenizer (`LlamaTokenizer`): Tokenizer from [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). transformer ([`HunyuanVideoTransformer3DModel`]): Conditional Transformer to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLHunyuanVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder_2 ([`CLIPTextModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer_2 (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, text_encoder: LlavaForConditionalGeneration, tokenizer: LlamaTokenizerFast, transformer: HunyuanVideoTransformer3DModel, vae: AutoencoderKLHunyuanVideo, scheduler: FlowMatchEulerDiscreteScheduler, text_encoder_2: CLIPTextModel, tokenizer_2: CLIPTokenizer, image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, image_processor=image_processor, ) self.vae_scaling_factor = self.vae.config.scaling_factor if getattr(self, "vae", None) else 0.476986 self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_llama_prompt_embeds( self, image: torch.Tensor, prompt: str | list[str], prompt_template: dict[str, Any], num_videos_per_prompt: int = 1, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 256, num_hidden_layers_to_skip: int = 2, image_embed_interleave: int = 2, ) -> tuple[torch.Tensor, torch.Tensor]: device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_template["template"].format(p) for p in prompt] crop_start = prompt_template.get("crop_start", None) image_emb_len = prompt_template.get("image_emb_len", 576) image_emb_start = prompt_template.get("image_emb_start", 5) image_emb_end = prompt_template.get("image_emb_end", 581) double_return_token_id = prompt_template.get("double_return_token_id", 271) if crop_start is None: prompt_template_input = self.tokenizer( prompt_template["template"], padding="max_length", return_tensors="pt", return_length=False, return_overflowing_tokens=False, return_attention_mask=False, ) crop_start = prompt_template_input["input_ids"].shape[-1] # Remove <|start_header_id|>, <|end_header_id|>, assistant, <|eot_id|>, and placeholder {} crop_start -= 5 max_sequence_length += crop_start text_inputs = self.tokenizer( prompt, max_length=max_sequence_length, padding="max_length", truncation=True, return_tensors="pt", return_length=False, return_overflowing_tokens=False, return_attention_mask=True, ) text_input_ids = text_inputs.input_ids.to(device=device) prompt_attention_mask = text_inputs.attention_mask.to(device=device) image_embeds = self.image_processor(image, return_tensors="pt").pixel_values.to(device) image_token_index = self.text_encoder.config.image_token_index pad_token_id = self.text_encoder.config.pad_token_id expanded_inputs = _expand_input_ids_with_image_tokens( text_input_ids, prompt_attention_mask, max_sequence_length, image_token_index, image_emb_len, image_emb_start, image_emb_end, pad_token_id, ) prompt_embeds = self.text_encoder( **expanded_inputs, pixel_values=image_embeds, output_hidden_states=True, ).hidden_states[-(num_hidden_layers_to_skip + 1)] prompt_embeds = prompt_embeds.to(dtype=dtype) if crop_start is not None and crop_start > 0: text_crop_start = crop_start - 1 + image_emb_len batch_indices, last_double_return_token_indices = torch.where(text_input_ids == double_return_token_id) if last_double_return_token_indices.shape[0] == 3: # in case the prompt is too long last_double_return_token_indices = torch.cat( (last_double_return_token_indices, torch.tensor([text_input_ids.shape[-1]])) ) batch_indices = torch.cat((batch_indices, torch.tensor([0]))) last_double_return_token_indices = last_double_return_token_indices.reshape(text_input_ids.shape[0], -1)[ :, -1 ] batch_indices = batch_indices.reshape(text_input_ids.shape[0], -1)[:, -1] assistant_crop_start = last_double_return_token_indices - 1 + image_emb_len - 4 assistant_crop_end = last_double_return_token_indices - 1 + image_emb_len attention_mask_assistant_crop_start = last_double_return_token_indices - 4 attention_mask_assistant_crop_end = last_double_return_token_indices prompt_embed_list = [] prompt_attention_mask_list = [] image_embed_list = [] image_attention_mask_list = [] for i in range(text_input_ids.shape[0]): prompt_embed_list.append( torch.cat( [ prompt_embeds[i, text_crop_start : assistant_crop_start[i].item()], prompt_embeds[i, assistant_crop_end[i].item() :], ] ) ) prompt_attention_mask_list.append( torch.cat( [ prompt_attention_mask[i, crop_start : attention_mask_assistant_crop_start[i].item()], prompt_attention_mask[i, attention_mask_assistant_crop_end[i].item() :], ] ) ) image_embed_list.append(prompt_embeds[i, image_emb_start:image_emb_end]) image_attention_mask_list.append( torch.ones(image_embed_list[-1].shape[0]).to(prompt_embeds.device).to(prompt_attention_mask.dtype) ) prompt_embed_list = torch.stack(prompt_embed_list) prompt_attention_mask_list = torch.stack(prompt_attention_mask_list) image_embed_list = torch.stack(image_embed_list) image_attention_mask_list = torch.stack(image_attention_mask_list) if 0 < image_embed_interleave < 6: image_embed_list = image_embed_list[:, ::image_embed_interleave, :] image_attention_mask_list = image_attention_mask_list[:, ::image_embed_interleave] assert ( prompt_embed_list.shape[0] == prompt_attention_mask_list.shape[0] and image_embed_list.shape[0] == image_attention_mask_list.shape[0] ) prompt_embeds = torch.cat([image_embed_list, prompt_embed_list], dim=1) prompt_attention_mask = torch.cat([image_attention_mask_list, prompt_attention_mask_list], dim=1) return prompt_embeds, prompt_attention_mask def _get_clip_prompt_embeds( self, prompt: str | list[str], num_videos_per_prompt: int = 1, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 77, ) -> torch.Tensor: device = device or self._execution_device dtype = dtype or self.text_encoder_2.dtype prompt = [prompt] if isinstance(prompt, str) else prompt text_inputs = self.tokenizer_2( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output return prompt_embeds def encode_prompt( self, image: torch.Tensor, prompt: str | list[str], prompt_2: str | list[str] = None, prompt_template: dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, pooled_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 256, image_embed_interleave: int = 2, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_llama_prompt_embeds( image, prompt, prompt_template, num_videos_per_prompt, device=device, dtype=dtype, max_sequence_length=max_sequence_length, image_embed_interleave=image_embed_interleave, ) if pooled_prompt_embeds is None: if prompt_2 is None: prompt_2 = prompt pooled_prompt_embeds = self._get_clip_prompt_embeds( prompt, num_videos_per_prompt, device=device, dtype=dtype, max_sequence_length=77, ) return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask def check_inputs( self, prompt, prompt_2, height, width, prompt_embeds=None, callback_on_step_end_tensor_inputs=None, prompt_template=None, true_cfg_scale=1.0, guidance_scale=1.0, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if prompt_template is not None: if not isinstance(prompt_template, dict): raise ValueError(f"`prompt_template` has to be of type `dict` but is {type(prompt_template)}") if "template" not in prompt_template: raise ValueError( f"`prompt_template` has to contain a key `template` but only found {prompt_template.keys()}" ) if true_cfg_scale > 1.0 and guidance_scale > 1.0: logger.warning( "Both `true_cfg_scale` and `guidance_scale` are greater than 1.0. This will result in both " "classifier-free guidance and embedded-guidance to be applied. This is not recommended " "as it may lead to higher memory usage, slower inference and potentially worse results." ) def prepare_latents( self, image: torch.Tensor, batch_size: int, num_channels_latents: int = 32, height: int = 720, width: int = 1280, num_frames: int = 129, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, image_condition_type: str = "latent_concat", ) -> torch.Tensor: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 latent_height, latent_width = height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width) image = image.unsqueeze(2) # [B, C, 1, H, W] if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i].unsqueeze(0)), generator[i], "argmax") for i in range(batch_size) ] else: image_latents = [retrieve_latents(self.vae.encode(img.unsqueeze(0)), generator, "argmax") for img in image] image_latents = torch.cat(image_latents, dim=0).to(dtype) * self.vae_scaling_factor image_latents = image_latents.repeat(1, 1, num_latent_frames, 1, 1) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) t = torch.tensor([0.999]).to(device=device) latents = latents * t + image_latents * (1 - t) if image_condition_type == "token_replace": image_latents = image_latents[:, :, :1] return latents, image_latents def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." deprecate( "enable_vae_slicing", "0.40.0", depr_message, ) self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." deprecate( "disable_vae_slicing", "0.40.0", depr_message, ) self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." deprecate( "enable_vae_tiling", "0.40.0", depr_message, ) self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." deprecate( "disable_vae_tiling", "0.40.0", depr_message, ) self.vae.disable_tiling() @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PIL.Image.Image, prompt: str | list[str] = None, prompt_2: str | list[str] = None, negative_prompt: str | list[str] = None, negative_prompt_2: str | list[str] = None, height: int = 720, width: int = 1280, num_frames: int = 129, num_inference_steps: int = 50, sigmas: list[float] = None, true_cfg_scale: float = 1.0, guidance_scale: float = 1.0, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, pooled_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_pooled_prompt_embeds: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], prompt_template: dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, max_sequence_length: int = 256, image_embed_interleave: int | None = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `list[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is will be used instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). negative_prompt_2 (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. height (`int`, defaults to `720`): The height in pixels of the generated image. width (`int`, defaults to `1280`): The width in pixels of the generated image. num_frames (`int`, defaults to `129`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. true_cfg_scale (`float`, *optional*, defaults to 1.0): When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. guidance_scale (`float`, defaults to `1.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. Note that the only available HunyuanVideo model is CFG-distilled, which means that traditional guidance between unconditional and conditional latent is not applied. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~HunyuanVideoPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, prompt_embeds, callback_on_step_end_tensor_inputs, prompt_template, true_cfg_scale, guidance_scale, ) image_condition_type = self.transformer.config.image_condition_type has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None ) do_true_cfg = true_cfg_scale > 1 and has_neg_prompt image_embed_interleave = ( image_embed_interleave if image_embed_interleave is not None else ( 2 if image_condition_type == "latent_concat" else 4 if image_condition_type == "token_replace" else 1 ) ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Prepare latent variables vae_dtype = self.vae.dtype image_tensor = self.video_processor.preprocess(image, height, width).to(device, vae_dtype) if image_condition_type == "latent_concat": num_channels_latents = (self.transformer.config.in_channels - 1) // 2 elif image_condition_type == "token_replace": num_channels_latents = self.transformer.config.in_channels latents, image_latents = self.prepare_latents( image_tensor, batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, image_condition_type, ) if image_condition_type == "latent_concat": image_latents[:, :, 1:] = 0 mask = image_latents.new_ones(image_latents.shape[0], 1, *image_latents.shape[2:]) mask[:, :, 1:] = 0 # 4. Encode input prompt transformer_dtype = self.transformer.dtype prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = self.encode_prompt( image=image, prompt=prompt, prompt_2=prompt_2, prompt_template=prompt_template, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, prompt_attention_mask=prompt_attention_mask, device=device, max_sequence_length=max_sequence_length, image_embed_interleave=image_embed_interleave, ) prompt_embeds = prompt_embeds.to(transformer_dtype) prompt_attention_mask = prompt_attention_mask.to(transformer_dtype) pooled_prompt_embeds = pooled_prompt_embeds.to(transformer_dtype) if do_true_cfg: black_image = PIL.Image.new("RGB", (width, height), 0) negative_prompt_embeds, negative_pooled_prompt_embeds, negative_prompt_attention_mask = self.encode_prompt( image=black_image, prompt=negative_prompt, prompt_2=negative_prompt_2, prompt_template=prompt_template, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=negative_pooled_prompt_embeds, prompt_attention_mask=negative_prompt_attention_mask, device=device, max_sequence_length=max_sequence_length, ) negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) negative_prompt_attention_mask = negative_prompt_attention_mask.to(transformer_dtype) negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.to(transformer_dtype) # 5. Prepare timesteps sigmas = np.linspace(1.0, 0.0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas) # 6. Prepare guidance condition guidance = None if self.transformer.config.guidance_embeds: guidance = ( torch.tensor([guidance_scale] * latents.shape[0], dtype=transformer_dtype, device=device) * 1000.0 ) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) if image_condition_type == "latent_concat": latent_model_input = torch.cat([latents, image_latents, mask], dim=1).to(transformer_dtype) elif image_condition_type == "token_replace": latent_model_input = torch.cat([image_latents, latents[:, :, 1:]], dim=2).to(transformer_dtype) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, pooled_projections=pooled_prompt_embeds, guidance=guidance, attention_kwargs=attention_kwargs, return_dict=False, )[0] if do_true_cfg: neg_noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, encoder_attention_mask=negative_prompt_attention_mask, pooled_projections=negative_pooled_prompt_embeds, guidance=guidance, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) # compute the previous noisy sample x_t -> x_t-1 if image_condition_type == "latent_concat": latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] elif image_condition_type == "token_replace": latents = latents = self.scheduler.step( noise_pred[:, :, 1:], t, latents[:, :, 1:], return_dict=False )[0] latents = torch.cat([image_latents, latents], dim=2) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) / self.vae_scaling_factor video = self.vae.decode(latents, return_dict=False)[0] if image_condition_type == "latent_concat": video = video[:, :, 4:, :, :] video = self.video_processor.postprocess_video(video, output_type=output_type) else: if image_condition_type == "latent_concat": video = latents[:, :, 1:, :, :] else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return HunyuanVideoPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_video_image2video.py", "license": "Apache License 2.0", "lines": 885, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/hunyuan_video/test_hunyuan_image2video.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, LlamaConfig, LlamaTokenizerFast, LlavaConfig, LlavaForConditionalGeneration, ) from transformers.models.clip import CLIPVisionConfig from diffusers import ( AutoencoderKLHunyuanVideo, FlowMatchEulerDiscreteScheduler, HunyuanVideoImageToVideoPipeline, HunyuanVideoTransformer3DModel, ) from ...testing_utils import enable_full_determinism, torch_device from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np enable_full_determinism() class HunyuanVideoImageToVideoPipelineFastTests( PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, unittest.TestCase ): pipeline_class = HunyuanVideoImageToVideoPipeline params = frozenset( ["image", "prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"] ) batch_params = frozenset(["prompt", "image"]) required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False # there is no xformers processor for Flux test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): torch.manual_seed(0) transformer = HunyuanVideoTransformer3DModel( in_channels=2 * 4 + 1, out_channels=4, num_attention_heads=2, attention_head_dim=10, num_layers=num_layers, num_single_layers=num_single_layers, num_refiner_layers=1, patch_size=1, patch_size_t=1, guidance_embeds=False, text_embed_dim=16, pooled_projection_dim=8, rope_axes_dim=(2, 4, 4), image_condition_type="latent_concat", ) torch.manual_seed(0) vae = AutoencoderKLHunyuanVideo( in_channels=3, out_channels=3, latent_channels=4, down_block_types=( "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", ), up_block_types=( "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", ), block_out_channels=(8, 8, 8, 8), layers_per_block=1, act_fn="silu", norm_num_groups=4, scaling_factor=0.476986, spatial_compression_ratio=8, temporal_compression_ratio=4, mid_block_add_attention=True, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) text_config = LlamaConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=2, pad_token_id=100, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) vision_config = CLIPVisionConfig( hidden_size=8, intermediate_size=37, projection_dim=32, num_attention_heads=4, num_hidden_layers=2, image_size=224, ) llava_text_encoder_config = LlavaConfig(vision_config, text_config, pad_token_id=100, image_token_index=101) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=8, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = LlavaForConditionalGeneration(llava_text_encoder_config) tokenizer = LlamaTokenizerFast.from_pretrained("finetrainers/dummy-hunyaunvideo", subfolder="tokenizer") torch.manual_seed(0) text_encoder_2 = CLIPTextModel(clip_text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image_height = 16 image_width = 16 image = Image.new("RGB", (image_width, image_height)) inputs = { "image": image, "prompt": "dance monkey", "prompt_template": { "template": "{}", "crop_start": 0, "image_emb_len": 49, "image_emb_start": 5, "image_emb_end": 54, "double_return_token_id": 0, }, "generator": generator, "num_inference_steps": 2, "guidance_scale": 4.5, "height": image_height, "width": image_width, "num_frames": 9, "max_sequence_length": 64, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] # NOTE: The expected video has 4 lesser frames because they are dropped in the pipeline self.assertEqual(generated_video.shape, (5, 3, 16, 16)) # fmt: off expected_slice = torch.tensor([0.4441, 0.4790, 0.4485, 0.5748, 0.3539, 0.1553, 0.2707, 0.3594, 0.5331, 0.6645, 0.6799, 0.5257, 0.5092, 0.3450, 0.4276, 0.4127]) # fmt: on generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue( torch.allclose(generated_slice, expected_slice, atol=1e-3), "The generated video does not match the expected slice.", ) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): # Seems to require higher tolerance than the other tests expected_diff_max = 0.6 generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling pipe.vae.enable_tiling( tile_sample_min_height=96, tile_sample_min_width=96, tile_sample_stride_height=64, tile_sample_stride_width=64, ) inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", ) # TODO(aryan): Create a dummy gemma model with smol vocab size @unittest.skip( "A very small vocab size is used for fast tests. So, Any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_consistent(self): pass @unittest.skip( "A very small vocab size is used for fast tests. So, Any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_single_identical(self): pass @unittest.skip( "Encode prompt currently does not work in isolation because of requiring image embeddings from image processor. The test does not handle this case, or we need to rewrite encode_prompt." ) def test_encode_prompt_works_in_isolation(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/hunyuan_video/test_hunyuan_image2video.py", "license": "Apache License 2.0", "lines": 337, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/community/cogvideox_ddim_inversion.py
""" This script performs DDIM inversion for video frames using a pre-trained model and generates a video reconstruction based on a provided prompt. It utilizes the CogVideoX pipeline to process video frames, apply the DDIM inverse scheduler, and produce an output video. **Please notice that this script is based on the CogVideoX 5B model, and would not generate a good result for 2B variants.** Usage: python cogvideox_ddim_inversion.py --model-path /path/to/model --prompt "a prompt" --video-path /path/to/video.mp4 --output-path /path/to/output For more details about the cli arguments, please run `python cogvideox_ddim_inversion.py --help`. Author: LittleNyima <littlenyima[at]163[dot]com> """ import argparse import math import os from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union, cast import torch import torch.nn.functional as F import torchvision.transforms as T from transformers import T5EncoderModel, T5Tokenizer from diffusers.models.attention_processor import Attention, CogVideoXAttnProcessor2_0 from diffusers.models.autoencoders import AutoencoderKLCogVideoX from diffusers.models.embeddings import apply_rotary_emb from diffusers.models.transformers.cogvideox_transformer_3d import CogVideoXBlock, CogVideoXTransformer3DModel from diffusers.pipelines.cogvideo.pipeline_cogvideox import CogVideoXPipeline, retrieve_timesteps from diffusers.schedulers import CogVideoXDDIMScheduler, DDIMInverseScheduler from diffusers.utils import export_to_video # Must import after torch because this can sometimes lead to a nasty segmentation fault, or stack smashing error. # Very few bug reports but it happens. Look in decord Github issues for more relevant information. import decord # isort: skip class DDIMInversionArguments(TypedDict): model_path: str prompt: str video_path: str output_path: str guidance_scale: float num_inference_steps: int skip_frames_start: int skip_frames_end: int frame_sample_step: Optional[int] max_num_frames: int width: int height: int fps: int dtype: torch.dtype seed: int device: torch.device def get_args() -> DDIMInversionArguments: parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str, required=True, help="Path of the pretrained model") parser.add_argument("--prompt", type=str, required=True, help="Prompt for the direct sample procedure") parser.add_argument("--video_path", type=str, required=True, help="Path of the video for inversion") parser.add_argument("--output_path", type=str, default="output", help="Path of the output videos") parser.add_argument("--guidance_scale", type=float, default=6.0, help="Classifier-free guidance scale") parser.add_argument("--num_inference_steps", type=int, default=50, help="Number of inference steps") parser.add_argument("--skip_frames_start", type=int, default=0, help="Number of skipped frames from the start") parser.add_argument("--skip_frames_end", type=int, default=0, help="Number of skipped frames from the end") parser.add_argument("--frame_sample_step", type=int, default=None, help="Temporal stride of the sampled frames") parser.add_argument("--max_num_frames", type=int, default=81, help="Max number of sampled frames") parser.add_argument("--width", type=int, default=720, help="Resized width of the video frames") parser.add_argument("--height", type=int, default=480, help="Resized height of the video frames") parser.add_argument("--fps", type=int, default=8, help="Frame rate of the output videos") parser.add_argument("--dtype", type=str, default="bf16", choices=["bf16", "fp16"], help="Dtype of the model") parser.add_argument("--seed", type=int, default=42, help="Seed for the random number generator") parser.add_argument("--device", type=str, default="cuda", choices=["cuda", "cpu"], help="Device for inference") args = parser.parse_args() args.dtype = torch.bfloat16 if args.dtype == "bf16" else torch.float16 args.device = torch.device(args.device) return DDIMInversionArguments(**vars(args)) class CogVideoXAttnProcessor2_0ForDDIMInversion(CogVideoXAttnProcessor2_0): def __init__(self): super().__init__() def calculate_attention( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn: Attention, batch_size: int, image_seq_length: int, text_seq_length: int, attention_mask: Optional[torch.Tensor], image_rotary_emb: Optional[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: r""" Core attention computation with inversion-guided RoPE integration. Args: query (`torch.Tensor`): `[batch_size, seq_len, dim]` query tensor key (`torch.Tensor`): `[batch_size, seq_len, dim]` key tensor value (`torch.Tensor`): `[batch_size, seq_len, dim]` value tensor attn (`Attention`): Parent attention module with projection layers batch_size (`int`): Effective batch size (after chunk splitting) image_seq_length (`int`): Length of image feature sequence text_seq_length (`int`): Length of text feature sequence attention_mask (`Optional[torch.Tensor]`): Attention mask tensor image_rotary_emb (`Optional[torch.Tensor]`): Rotary embeddings for image positions Returns: `Tuple[torch.Tensor, torch.Tensor]`: (1) hidden_states: [batch_size, image_seq_length, dim] processed image features (2) encoder_hidden_states: [batch_size, text_seq_length, dim] processed text features """ inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: if key.size(2) == query.size(2): # Attention for reference hidden states key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) else: # RoPE should be applied to each group of image tokens key[:, :, text_seq_length : text_seq_length + image_seq_length] = apply_rotary_emb( key[:, :, text_seq_length : text_seq_length + image_seq_length], image_rotary_emb ) key[:, :, text_seq_length * 2 + image_seq_length :] = apply_rotary_emb( key[:, :, text_seq_length * 2 + image_seq_length :], image_rotary_emb ) hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states, hidden_states = hidden_states.split( [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 ) return hidden_states, encoder_hidden_states def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: r""" Process the dual-path attention for the inversion-guided denoising procedure. Args: attn (`Attention`): Parent attention module hidden_states (`torch.Tensor`): `[batch_size, image_seq_len, dim]` Image tokens encoder_hidden_states (`torch.Tensor`): `[batch_size, text_seq_len, dim]` Text tokens attention_mask (`Optional[torch.Tensor]`): Optional attention mask image_rotary_emb (`Optional[torch.Tensor]`): Rotary embeddings for image tokens Returns: `Tuple[torch.Tensor, torch.Tensor]`: (1) Final hidden states: `[batch_size, image_seq_length, dim]` Resulting image tokens (2) Final encoder states: `[batch_size, text_seq_length, dim]` Resulting text tokens """ image_seq_length = hidden_states.size(1) text_seq_length = encoder_hidden_states.size(1) hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) query, query_reference = query.chunk(2) key, key_reference = key.chunk(2) value, value_reference = value.chunk(2) batch_size = batch_size // 2 hidden_states, encoder_hidden_states = self.calculate_attention( query=query, key=torch.cat((key, key_reference), dim=1), value=torch.cat((value, value_reference), dim=1), attn=attn, batch_size=batch_size, image_seq_length=image_seq_length, text_seq_length=text_seq_length, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) hidden_states_reference, encoder_hidden_states_reference = self.calculate_attention( query=query_reference, key=key_reference, value=value_reference, attn=attn, batch_size=batch_size, image_seq_length=image_seq_length, text_seq_length=text_seq_length, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) return ( torch.cat((hidden_states, hidden_states_reference)), torch.cat((encoder_hidden_states, encoder_hidden_states_reference)), ) class OverrideAttnProcessors: r""" Context manager for temporarily overriding attention processors in CogVideo transformer blocks. Designed for DDIM inversion process, replaces original attention processors with `CogVideoXAttnProcessor2_0ForDDIMInversion` and restores them upon exit. Uses Python context manager pattern to safely manage processor replacement. Typical usage: ```python with OverrideAttnProcessors(transformer): # Perform DDIM inversion operations ``` Args: transformer (`CogVideoXTransformer3DModel`): The transformer model containing attention blocks to be modified. Should have `transformer_blocks` attribute containing `CogVideoXBlock` instances. """ def __init__(self, transformer: CogVideoXTransformer3DModel): self.transformer = transformer self.original_processors = {} def __enter__(self): for block in self.transformer.transformer_blocks: block = cast(CogVideoXBlock, block) self.original_processors[id(block)] = block.attn1.get_processor() block.attn1.set_processor(CogVideoXAttnProcessor2_0ForDDIMInversion()) def __exit__(self, _0, _1, _2): for block in self.transformer.transformer_blocks: block = cast(CogVideoXBlock, block) block.attn1.set_processor(self.original_processors[id(block)]) def get_video_frames( video_path: str, width: int, height: int, skip_frames_start: int, skip_frames_end: int, max_num_frames: int, frame_sample_step: Optional[int], ) -> torch.FloatTensor: """ Extract and preprocess video frames from a video file for VAE processing. Args: video_path (`str`): Path to input video file width (`int`): Target frame width for decoding height (`int`): Target frame height for decoding skip_frames_start (`int`): Number of frames to skip at video start skip_frames_end (`int`): Number of frames to skip at video end max_num_frames (`int`): Maximum allowed number of output frames frame_sample_step (`Optional[int]`): Frame sampling step size. If None, automatically calculated as: (total_frames - skipped_frames) // max_num_frames Returns: `torch.FloatTensor`: Preprocessed frames in `[F, C, H, W]` format where: - `F`: Number of frames (adjusted to 4k + 1 for VAE compatibility) - `C`: Channels (3 for RGB) - `H`: Frame height - `W`: Frame width """ with decord.bridge.use_torch(): video_reader = decord.VideoReader(uri=video_path, width=width, height=height) video_num_frames = len(video_reader) start_frame = min(skip_frames_start, video_num_frames) end_frame = max(0, video_num_frames - skip_frames_end) if end_frame <= start_frame: indices = [start_frame] elif end_frame - start_frame <= max_num_frames: indices = list(range(start_frame, end_frame)) else: step = frame_sample_step or (end_frame - start_frame) // max_num_frames indices = list(range(start_frame, end_frame, step)) frames = video_reader.get_batch(indices=indices) frames = frames[:max_num_frames].float() # ensure that we don't go over the limit # Choose first (4k + 1) frames as this is how many is required by the VAE selected_num_frames = frames.size(0) remainder = (3 + selected_num_frames) % 4 if remainder != 0: frames = frames[:-remainder] assert frames.size(0) % 4 == 1 # Normalize the frames transform = T.Lambda(lambda x: x / 255.0 * 2.0 - 1.0) frames = torch.stack(tuple(map(transform, frames)), dim=0) return frames.permute(0, 3, 1, 2).contiguous() # [F, C, H, W] class CogVideoXDDIMInversionOutput: inverse_latents: torch.FloatTensor recon_latents: torch.FloatTensor def __init__(self, inverse_latents: torch.FloatTensor, recon_latents: torch.FloatTensor): self.inverse_latents = inverse_latents self.recon_latents = recon_latents class CogVideoXPipelineForDDIMInversion(CogVideoXPipeline): def __init__( self, tokenizer: T5Tokenizer, text_encoder: T5EncoderModel, vae: AutoencoderKLCogVideoX, transformer: CogVideoXTransformer3DModel, scheduler: CogVideoXDDIMScheduler, ): super().__init__( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler, ) self.inverse_scheduler = DDIMInverseScheduler(**scheduler.config) def encode_video_frames(self, video_frames: torch.FloatTensor) -> torch.FloatTensor: """ Encode video frames into latent space using Variational Autoencoder. Args: video_frames (`torch.FloatTensor`): Input frames tensor in `[F, C, H, W]` format from `get_video_frames()` Returns: `torch.FloatTensor`: Encoded latents in `[1, F, D, H_latent, W_latent]` format where: - `F`: Number of frames (same as input) - `D`: Latent channel dimension - `H_latent`: Latent space height (H // 2^vae.downscale_factor) - `W_latent`: Latent space width (W // 2^vae.downscale_factor) """ vae: AutoencoderKLCogVideoX = self.vae video_frames = video_frames.to(device=vae.device, dtype=vae.dtype) video_frames = video_frames.unsqueeze(0).permute(0, 2, 1, 3, 4) # [B, C, F, H, W] latent_dist = vae.encode(x=video_frames).latent_dist.sample().transpose(1, 2) return latent_dist * vae.config.scaling_factor @torch.no_grad() def export_latents_to_video(self, latents: torch.FloatTensor, video_path: str, fps: int): r""" Decode latent vectors into video and export as video file. Args: latents (`torch.FloatTensor`): Encoded latents in `[B, F, D, H_latent, W_latent]` format from `encode_video_frames()` video_path (`str`): Output path for video file fps (`int`): Target frames per second for output video """ video = self.decode_latents(latents) frames = self.video_processor.postprocess_video(video=video, output_type="pil") os.makedirs(os.path.dirname(video_path), exist_ok=True) export_to_video(video_frames=frames[0], output_video_path=video_path, fps=fps) # Modified from CogVideoXPipeline.__call__ @torch.no_grad() def sample( self, latents: torch.FloatTensor, scheduler: Union[DDIMInverseScheduler, CogVideoXDDIMScheduler], prompt: Optional[Union[str, List[str]]] = None, negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 50, guidance_scale: float = 6, use_dynamic_cfg: bool = False, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, attention_kwargs: Optional[Dict[str, Any]] = None, reference_latents: torch.FloatTensor = None, ) -> torch.FloatTensor: r""" Execute the core sampling loop for video generation/inversion using CogVideoX. Implements the full denoising trajectory recording for both DDIM inversion and generation processes. Supports dynamic classifier-free guidance and reference latent conditioning. Args: latents (`torch.FloatTensor`): Initial noise tensor of shape `[B, F, C, H, W]`. scheduler (`Union[DDIMInverseScheduler, CogVideoXDDIMScheduler]`): Scheduling strategy for diffusion process. Use: (1) `DDIMInverseScheduler` for inversion (2) `CogVideoXDDIMScheduler` for generation prompt (`Optional[Union[str, List[str]]]`): Text prompt(s) for conditional generation. Defaults to unconditional. negative_prompt (`Optional[Union[str, List[str]]]`): Negative prompt(s) for guidance. Requires `guidance_scale > 1`. num_inference_steps (`int`): Number of denoising steps. Affects quality/compute trade-off. guidance_scale (`float`): Classifier-free guidance weight. 1.0 = no guidance. use_dynamic_cfg (`bool`): Enable time-varying guidance scale (cosine schedule) eta (`float`): DDIM variance parameter (0 = deterministic process) generator (`Optional[Union[torch.Generator, List[torch.Generator]]]`): Random number generator(s) for reproducibility attention_kwargs (`Optional[Dict[str, Any]]`): Custom parameters for attention modules reference_latents (`torch.FloatTensor`): Reference latent trajectory for conditional sampling. Shape should match `[T, B, F, C, H, W]` where `T` is number of timesteps Returns: `torch.FloatTensor`: Full denoising trajectory tensor of shape `[T, B, F, C, H, W]`. """ self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, negative_prompt, do_classifier_free_guidance, device=device, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) if reference_latents is not None: prompt_embeds = torch.cat([prompt_embeds] * 2, dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(scheduler, num_inference_steps, device) self._num_timesteps = len(timesteps) # 5. Prepare latents. latents = latents.to(device=device) * scheduler.init_noise_sigma # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if isinstance(scheduler, DDIMInverseScheduler): # Inverse scheduler does not accept extra kwargs extra_step_kwargs = {} # 7. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings( height=latents.size(3) * self.vae_scale_factor_spatial, width=latents.size(4) * self.vae_scale_factor_spatial, num_frames=latents.size(1), device=device, ) if self.transformer.config.use_rotary_positional_embeddings else None ) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * scheduler.order, 0) trajectory = torch.zeros_like(latents).unsqueeze(0).repeat(len(timesteps), 1, 1, 1, 1, 1) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents if reference_latents is not None: reference = reference_latents[i] reference = torch.cat([reference] * 2) if do_classifier_free_guidance else reference latent_model_input = torch.cat([latent_model_input, reference], dim=0) latent_model_input = scheduler.scale_model_input(latent_model_input, t) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) # predict noise model_output noise_pred = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, image_rotary_emb=image_rotary_emb, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred.float() if reference_latents is not None: # Recover the original batch size noise_pred, _ = noise_pred.chunk(2) # perform guidance if use_dynamic_cfg: self._guidance_scale = 1 + guidance_scale * ( (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2 ) if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the noisy sample x_t-1 -> x_t latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] latents = latents.to(prompt_embeds.dtype) trajectory[i] = latents if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0): progress_bar.update() # Offload all models self.maybe_free_model_hooks() return trajectory @torch.no_grad() def __call__( self, prompt: str, video_path: str, guidance_scale: float, num_inference_steps: int, skip_frames_start: int, skip_frames_end: int, frame_sample_step: Optional[int], max_num_frames: int, width: int, height: int, seed: int, ): """ Performs DDIM inversion on a video to reconstruct it with a new prompt. Args: prompt (`str`): The text prompt to guide the reconstruction. video_path (`str`): Path to the input video file. guidance_scale (`float`): Scale for classifier-free guidance. num_inference_steps (`int`): Number of denoising steps. skip_frames_start (`int`): Number of frames to skip from the beginning of the video. skip_frames_end (`int`): Number of frames to skip from the end of the video. frame_sample_step (`Optional[int]`): Step size for sampling frames. If None, all frames are used. max_num_frames (`int`): Maximum number of frames to process. width (`int`): Width of the output video frames. height (`int`): Height of the output video frames. seed (`int`): Random seed for reproducibility. Returns: `CogVideoXDDIMInversionOutput`: Contains the inverse latents and reconstructed latents. """ if not self.transformer.config.use_rotary_positional_embeddings: raise NotImplementedError("This script supports CogVideoX 5B model only.") video_frames = get_video_frames( video_path=video_path, width=width, height=height, skip_frames_start=skip_frames_start, skip_frames_end=skip_frames_end, max_num_frames=max_num_frames, frame_sample_step=frame_sample_step, ).to(device=self.device) video_latents = self.encode_video_frames(video_frames=video_frames) inverse_latents = self.sample( latents=video_latents, scheduler=self.inverse_scheduler, prompt="", num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.Generator(device=self.device).manual_seed(seed), ) with OverrideAttnProcessors(transformer=self.transformer): recon_latents = self.sample( latents=torch.randn_like(video_latents), scheduler=self.scheduler, prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.Generator(device=self.device).manual_seed(seed), reference_latents=reversed(inverse_latents), ) return CogVideoXDDIMInversionOutput( inverse_latents=inverse_latents, recon_latents=recon_latents, ) if __name__ == "__main__": arguments = get_args() pipeline = CogVideoXPipelineForDDIMInversion.from_pretrained( arguments.pop("model_path"), torch_dtype=arguments.pop("dtype"), ).to(device=arguments.pop("device")) output_path = arguments.pop("output_path") fps = arguments.pop("fps") inverse_video_path = os.path.join(output_path, f"{arguments.get('video_path')}_inversion.mp4") recon_video_path = os.path.join(output_path, f"{arguments.get('video_path')}_reconstruction.mp4") # Run DDIM inversion output = pipeline(**arguments) pipeline.export_latents_to_video(output.inverse_latents[-1], inverse_video_path, fps) pipeline.export_latents_to_video(output.recon_latents[-1], recon_video_path, fps)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/cogvideox_ddim_inversion.py", "license": "Apache License 2.0", "lines": 551, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:examples/community/mod_controlnet_tile_sr_sdxl.py
# Copyright 2025 The DEVAIEXP Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F from PIL import Image from transformers import ( CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, ) from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import ( AutoencoderKL, ControlNetModel, ControlNetUnionModel, MultiControlNetModel, UNet2DConditionModel, ) from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.import_utils import is_invisible_watermark_available from diffusers.utils.torch_utils import is_compiled_module, randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker from diffusers.utils import is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py import torch from diffusers import DiffusionPipeline, ControlNetUnionModel, AutoencoderKL, UniPCMultistepScheduler from diffusers.utils import load_image from PIL import Image device = "cuda" # Initialize the models and pipeline controlnet = ControlNetUnionModel.from_pretrained( "brad-twinkl/controlnet-union-sdxl-1.0-promax", torch_dtype=torch.float16 ).to(device=device) vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to(device=device) model_id = "SG161222/RealVisXL_V5.0" pipe = StableDiffusionXLControlNetTileSRPipeline.from_pretrained( model_id, controlnet=controlnet, vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16" ).to(device) pipe.enable_model_cpu_offload() # << Enable this if you have limited VRAM pipe.enable_vae_tiling() # << Enable this if you have limited VRAM pipe.enable_vae_slicing() # << Enable this if you have limited VRAM # Set selected scheduler pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # Load image control_image = load_image("https://huggingface.co/datasets/DEVAIEXP/assets/resolve/main/1.jpg") original_height = control_image.height original_width = control_image.width print(f"Current resolution: H:{original_height} x W:{original_width}") # Pre-upscale image for tiling resolution = 4096 tile_gaussian_sigma = 0.3 max_tile_size = 1024 # or 1280 current_size = max(control_image.size) scale_factor = max(2, resolution / current_size) new_size = (int(control_image.width * scale_factor), int(control_image.height * scale_factor)) image = control_image.resize(new_size, Image.LANCZOS) # Update target height and width target_height = image.height target_width = image.width print(f"Target resolution: H:{target_height} x W:{target_width}") # Calculate overlap size normal_tile_overlap, border_tile_overlap = calculate_overlap(target_width, target_height) # Set other params tile_weighting_method = TileWeightingMethod.COSINE.value guidance_scale = 4 num_inference_steps = 35 denoising_strenght = 0.65 controlnet_strength = 1.0 prompt = "high-quality, noise-free edges, high quality, 4k, hd, 8k" negative_prompt = "blurry, pixelated, noisy, low resolution, artifacts, poor details" # Image generation control_image = pipe( image=image, control_image=control_image, control_mode=[6], controlnet_conditioning_scale=float(controlnet_strength), prompt=prompt, negative_prompt=negative_prompt, normal_tile_overlap=normal_tile_overlap, border_tile_overlap=border_tile_overlap, height=target_height, width=target_width, original_size=(original_width, original_height), target_size=(target_width, target_height), guidance_scale=guidance_scale, strength=float(denoising_strenght), tile_weighting_method=tile_weighting_method, max_tile_size=max_tile_size, tile_gaussian_sigma=float(tile_gaussian_sigma), num_inference_steps=num_inference_steps, )["images"][0] ``` """ # This function was copied and adapted from https://huggingface.co/spaces/gokaygokay/TileUpscalerV2, licensed under Apache 2.0. def _adaptive_tile_size(image_size, base_tile_size=512, max_tile_size=1280): """ Calculate the adaptive tile size based on the image dimensions, ensuring the tile respects the aspect ratio and stays within the specified size limits. """ width, height = image_size aspect_ratio = width / height if aspect_ratio > 1: # Landscape orientation tile_width = min(width, max_tile_size) tile_height = min(int(tile_width / aspect_ratio), max_tile_size) else: # Portrait or square orientation tile_height = min(height, max_tile_size) tile_width = min(int(tile_height * aspect_ratio), max_tile_size) # Ensure the tile size is not smaller than the base_tile_size tile_width = max(tile_width, base_tile_size) tile_height = max(tile_height, base_tile_size) return tile_width, tile_height # Copied and adapted from https://github.com/huggingface/diffusers/blob/main/examples/community/mixture_tiling.py def _tile2pixel_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height ): """Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image Returns a tuple with: - Starting coordinates of rows in pixel space - Ending coordinates of rows in pixel space - Starting coordinates of columns in pixel space - Ending coordinates of columns in pixel space """ # Calculate initial indices px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap) px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap) # Calculate end indices px_row_end = px_row_init + tile_height px_col_end = px_col_init + tile_width # Ensure the last tile does not exceed the image dimensions px_row_end = min(px_row_end, image_height) px_col_end = min(px_col_end, image_width) return px_row_init, px_row_end, px_col_init, px_col_end # Copied and adapted from https://github.com/huggingface/diffusers/blob/main/examples/community/mixture_tiling.py def _tile2latent_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height ): """Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image Returns a tuple with: - Starting coordinates of rows in latent space - Ending coordinates of rows in latent space - Starting coordinates of columns in latent space - Ending coordinates of columns in latent space """ # Get pixel indices px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height ) # Convert to latent space latent_row_init = px_row_init // 8 latent_row_end = px_row_end // 8 latent_col_init = px_col_init // 8 latent_col_end = px_col_end // 8 latent_height = image_height // 8 latent_width = image_width // 8 # Ensure the last tile does not exceed the latent dimensions latent_row_end = min(latent_row_end, latent_height) latent_col_end = min(latent_col_end, latent_width) return latent_row_init, latent_row_end, latent_col_init, latent_col_end # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class StableDiffusionXLControlNetTileSRPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, ): r""" Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. controlnet ([`ControlNetUnionModel`]): Provides additional conditioning to the unet during the denoising process. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the config of `stabilityai/stable-diffusion-xl-refiner-1-0`. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: ControlNetUnionModel, scheduler: KarrasDiffusionSchedulers, requires_aesthetics_score: bool = False, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() if not isinstance(controlnet, ControlNetUnionModel): raise ValueError("Expected `controlnet` to be of type `ControlNetUnionModel`.") self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) def calculate_overlap(self, width, height, base_overlap=128): """ Calculates dynamic overlap based on the image's aspect ratio. Args: width (int): Width of the image in pixels. height (int): Height of the image in pixels. base_overlap (int, optional): Base overlap value in pixels. Defaults to 128. Returns: tuple: A tuple containing: - row_overlap (int): Overlap between tiles in consecutive rows. - col_overlap (int): Overlap between tiles in consecutive columns. """ ratio = height / width if ratio < 1: # Image is wider than tall return base_overlap // 2, base_overlap else: # Image is taller than wide return base_overlap, base_overlap * 2 class TileWeightingMethod(Enum): """Mode in which the tile weights will be generated""" COSINE = "Cosine" GAUSSIAN = "Gaussian" # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: str | None = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: str | None = None, negative_prompt_2: str | None = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) dtype = text_encoders[0].dtype if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) text_encoder.to(dtype) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, image, strength, num_inference_steps, normal_tile_overlap, border_tile_overlap, max_tile_size, tile_gaussian_sigma, tile_weighting_method, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if num_inference_steps is None: raise ValueError("`num_inference_steps` cannot be None.") elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0: raise ValueError( f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" f" {type(num_inference_steps)}." ) if normal_tile_overlap is None: raise ValueError("`normal_tile_overlap` cannot be None.") elif not isinstance(normal_tile_overlap, int) or normal_tile_overlap < 64: raise ValueError( f"`normal_tile_overlap` has to be greater than 64 but is {normal_tile_overlap} of type" f" {type(normal_tile_overlap)}." ) if border_tile_overlap is None: raise ValueError("`border_tile_overlap` cannot be None.") elif not isinstance(border_tile_overlap, int) or border_tile_overlap < 128: raise ValueError( f"`border_tile_overlap` has to be greater than 128 but is {border_tile_overlap} of type" f" {type(border_tile_overlap)}." ) if max_tile_size is None: raise ValueError("`max_tile_size` cannot be None.") elif not isinstance(max_tile_size, int) or max_tile_size not in (1024, 1280): raise ValueError( f"`max_tile_size` has to be in 1024 or 1280 but is {max_tile_size} of type {type(max_tile_size)}." ) if tile_gaussian_sigma is None: raise ValueError("`tile_gaussian_sigma` cannot be None.") elif not isinstance(tile_gaussian_sigma, float) or tile_gaussian_sigma <= 0: raise ValueError( f"`tile_gaussian_sigma` has to be a positive float but is {tile_gaussian_sigma} of type" f" {type(tile_gaussian_sigma)}." ) if tile_weighting_method is None: raise ValueError("`tile_weighting_method` cannot be None.") elif not isinstance(tile_weighting_method, str) or tile_weighting_method not in [ t.value for t in self.TileWeightingMethod ]: raise ValueError( f"`tile_weighting_method` has to be a string in ({[t.value for t in self.TileWeightingMethod]}) but is {tile_weighting_method} of type" f" {type(tile_weighting_method)}." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): self.check_image(image, prompt) elif ( isinstance(self.controlnet, ControlNetUnionModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetUnionModel) ): self.check_image(image, prompt) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetUnionModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetUnionModel) ) or ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image def check_image(self, image, prompt): image_is_pil = isinstance(image, Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents def prepare_latents( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True ): if not isinstance(image, (torch.Tensor, Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) latents_mean = latents_std = None if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " ) init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) if latents_mean is not None and latents_std is not None: latents_mean = latents_mean.to(device=device, dtype=dtype) latents_std = latents_std.to(device=device, dtype=dtype) init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std else: init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None, ): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list( negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) ) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if ( expected_add_embed_dim > passed_add_embed_dim and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." ) elif ( expected_add_embed_dim < passed_add_embed_dim and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." ) elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return add_time_ids, add_neg_time_ids def _generate_cosine_weights(self, tile_width, tile_height, nbatches, device, dtype): """ Generates cosine weights as a PyTorch tensor for blending tiles. Args: tile_width (int): Width of the tile in pixels. tile_height (int): Height of the tile in pixels. nbatches (int): Number of batches. device (torch.device): Device where the tensor will be allocated (e.g., 'cuda' or 'cpu'). dtype (torch.dtype): Data type of the tensor (e.g., torch.float32). Returns: torch.Tensor: A tensor containing cosine weights for blending tiles, expanded to match batch and channel dimensions. """ # Convert tile dimensions to latent space latent_width = tile_width // 8 latent_height = tile_height // 8 # Generate x and y coordinates in latent space x = np.arange(0, latent_width) y = np.arange(0, latent_height) # Calculate midpoints midpoint_x = (latent_width - 1) / 2 midpoint_y = (latent_height - 1) / 2 # Compute cosine probabilities for x and y x_probs = np.cos(np.pi * (x - midpoint_x) / latent_width) y_probs = np.cos(np.pi * (y - midpoint_y) / latent_height) # Create a 2D weight matrix using the outer product weights_np = np.outer(y_probs, x_probs) # Convert to a PyTorch tensor with the correct device and dtype weights_torch = torch.tensor(weights_np, device=device, dtype=dtype) # Expand for batch and channel dimensions tile_weights_expanded = torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1)) return tile_weights_expanded def _generate_gaussian_weights(self, tile_width, tile_height, nbatches, device, dtype, sigma=0.05): """ Generates Gaussian weights as a PyTorch tensor for blending tiles in latent space. Args: tile_width (int): Width of the tile in pixels. tile_height (int): Height of the tile in pixels. nbatches (int): Number of batches. device (torch.device): Device where the tensor will be allocated (e.g., 'cuda' or 'cpu'). dtype (torch.dtype): Data type of the tensor (e.g., torch.float32). sigma (float, optional): Standard deviation of the Gaussian distribution. Controls the smoothness of the weights. Defaults to 0.05. Returns: torch.Tensor: A tensor containing Gaussian weights for blending tiles, expanded to match batch and channel dimensions. """ # Convert tile dimensions to latent space latent_width = tile_width // 8 latent_height = tile_height // 8 # Generate Gaussian weights in latent space x = np.linspace(-1, 1, latent_width) y = np.linspace(-1, 1, latent_height) xx, yy = np.meshgrid(x, y) gaussian_weight = np.exp(-(xx**2 + yy**2) / (2 * sigma**2)) # Convert to a PyTorch tensor with the correct device and dtype weights_torch = torch.tensor(gaussian_weight, device=device, dtype=dtype) # Expand for batch and channel dimensions weights_expanded = weights_torch.unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions weights_expanded = weights_expanded.expand(nbatches, -1, -1, -1) # Expand to the number of batches return weights_expanded def _get_num_tiles(self, height, width, tile_height, tile_width, normal_tile_overlap, border_tile_overlap): """ Calculates the number of tiles needed to cover an image, choosing the appropriate formula based on the ratio between the image size and the tile size. This function automatically selects between two formulas: 1. A universal formula for typical cases (image-to-tile ratio <= 6:1). 2. A specialized formula with border tile overlap for larger or atypical cases (image-to-tile ratio > 6:1). Args: height (int): Height of the image in pixels. width (int): Width of the image in pixels. tile_height (int): Height of each tile in pixels. tile_width (int): Width of each tile in pixels. normal_tile_overlap (int): Overlap between tiles in pixels for normal (non-border) tiles. border_tile_overlap (int): Overlap between tiles in pixels for border tiles. Returns: tuple: A tuple containing: - grid_rows (int): Number of rows in the tile grid. - grid_cols (int): Number of columns in the tile grid. Notes: - The function uses the universal formula (without border_tile_overlap) for typical cases where the image-to-tile ratio is 6:1 or smaller. - For larger or atypical cases (image-to-tile ratio > 6:1), it uses a specialized formula that includes border_tile_overlap to ensure complete coverage of the image, especially at the edges. """ # Calculate the ratio between the image size and the tile size height_ratio = height / tile_height width_ratio = width / tile_width # If the ratio is greater than 6:1, use the formula with border_tile_overlap if height_ratio > 6 or width_ratio > 6: grid_rows = int(np.ceil((height - border_tile_overlap) / (tile_height - normal_tile_overlap))) + 1 grid_cols = int(np.ceil((width - border_tile_overlap) / (tile_width - normal_tile_overlap))) + 1 else: # Otherwise, use the universal formula grid_rows = int(np.ceil((height - normal_tile_overlap) / (tile_height - normal_tile_overlap))) grid_cols = int(np.ceil((width - normal_tile_overlap) / (tile_width - normal_tile_overlap))) return grid_rows, grid_cols def prepare_tiles( self, grid_rows, grid_cols, tile_weighting_method, tile_width, tile_height, normal_tile_overlap, border_tile_overlap, width, height, tile_sigma, batch_size, device, dtype, ): """ Processes image tiles by dynamically adjusting overlap and calculating Gaussian or cosine weights. Args: grid_rows (int): Number of rows in the tile grid. grid_cols (int): Number of columns in the tile grid. tile_weighting_method (str): Method for weighting tiles. Options: "Gaussian" or "Cosine". tile_width (int): Width of each tile in pixels. tile_height (int): Height of each tile in pixels. normal_tile_overlap (int): Overlap between tiles in pixels for normal tiles. border_tile_overlap (int): Overlap between tiles in pixels for border tiles. width (int): Width of the image in pixels. height (int): Height of the image in pixels. tile_sigma (float): Sigma parameter for Gaussian weighting. batch_size (int): Batch size for weight tiles. device (torch.device): Device where tensors will be allocated (e.g., 'cuda' or 'cpu'). dtype (torch.dtype): Data type of the tensors (e.g., torch.float32). Returns: tuple: A tuple containing: - tile_weights (np.ndarray): Array of weights for each tile. - tile_row_overlaps (np.ndarray): Array of row overlaps for each tile. - tile_col_overlaps (np.ndarray): Array of column overlaps for each tile. """ # Create arrays to store dynamic overlaps and weights tile_row_overlaps = np.full((grid_rows, grid_cols), normal_tile_overlap) tile_col_overlaps = np.full((grid_rows, grid_cols), normal_tile_overlap) tile_weights = np.empty((grid_rows, grid_cols), dtype=object) # Stores Gaussian or cosine weights # Iterate over tiles to adjust overlap and calculate weights for row in range(grid_rows): for col in range(grid_cols): # Calculate the size of the current tile px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( row, col, tile_width, tile_height, normal_tile_overlap, normal_tile_overlap, width, height ) current_tile_width = px_col_end - px_col_init current_tile_height = px_row_end - px_row_init sigma = tile_sigma # Adjust overlap for smaller tiles if current_tile_width < tile_width: px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( row, col, tile_width, tile_height, border_tile_overlap, border_tile_overlap, width, height ) current_tile_width = px_col_end - px_col_init tile_col_overlaps[row, col] = border_tile_overlap sigma = tile_sigma * 1.2 if current_tile_height < tile_height: px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( row, col, tile_width, tile_height, border_tile_overlap, border_tile_overlap, width, height ) current_tile_height = px_row_end - px_row_init tile_row_overlaps[row, col] = border_tile_overlap sigma = tile_sigma * 1.2 # Calculate weights for the current tile if tile_weighting_method == self.TileWeightingMethod.COSINE.value: tile_weights[row, col] = self._generate_cosine_weights( tile_width=current_tile_width, tile_height=current_tile_height, nbatches=batch_size, device=device, dtype=torch.float32, ) else: tile_weights[row, col] = self._generate_gaussian_weights( tile_width=current_tile_width, tile_height=current_tile_height, nbatches=batch_size, device=device, dtype=dtype, sigma=sigma, ) return tile_weights, tile_row_overlaps, tile_col_overlaps def upcast_vae(self): deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`") self.vae.to(dtype=torch.float32) @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, control_image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.9999, num_inference_steps: int = 50, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: str | None = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, control_mode: Optional[Union[int, List[int]]] = None, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, clip_skip: Optional[int] = None, normal_tile_overlap: int = 64, border_tile_overlap: int = 128, max_tile_size: int = 1024, tile_gaussian_sigma: float = 0.05, tile_weighting_method: str = "Cosine", **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`, *optional*): The initial image to be used as the starting point for the image generation process. Can also accept image latents as `image`, if passing latents directly, they will not be encoded again. control_image (`PipelineImageInput`, *optional*): The ControlNet input condition. ControlNet uses this input condition to generate guidance for Unet. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image default to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in init, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. height (`int`, *optional*): The height in pixels of the generated image. If not provided, defaults to the height of `control_image`. width (`int`, *optional*): The width in pixels of the generated image. If not provided, defaults to the width of `control_image`. strength (`float`, *optional*, defaults to 0.9999): Indicates the extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point, and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum, and the denoising process runs for the full number of iterations specified in `num_inference_steps`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages generating images closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original UNet. If multiple ControlNets are specified in init, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): In this mode, the ControlNet encoder will try to recognize the content of the input image even if you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. control_mode (`int` or `List[int]`, *optional*): The mode of ControlNet guidance. Can be used to specify different behaviors for multiple ControlNets. original_size (`Tuple[int, int]`, *optional*): If `original_size` is not the same as `target_size`, the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning. crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning. target_size (`Tuple[int, int]`, *optional*): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified, it will default to `(height, width)`. Part of SDXL's micro-conditioning. negative_original_size (`Tuple[int, int]`, *optional*): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning. negative_crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning. negative_target_size (`Tuple[int, int]`, *optional*): To negatively condition the generation process based on a target image resolution. It should be the same as the `target_size` for most cases. Part of SDXL's micro-conditioning. aesthetic_score (`float`, *optional*, defaults to 6.0): Used to simulate an aesthetic score of the generated image by influencing the positive text condition. Part of SDXL's micro-conditioning. negative_aesthetic_score (`float`, *optional*, defaults to 2.5): Used to simulate an aesthetic score of the generated image by influencing the negative text condition. Part of SDXL's micro-conditioning. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. normal_tile_overlap (`int`, *optional*, defaults to 64): Number of overlapping pixels between tiles in consecutive rows. border_tile_overlap (`int`, *optional*, defaults to 128): Number of overlapping pixels between tiles at the borders. max_tile_size (`int`, *optional*, defaults to 1024): Maximum size of a tile in pixels. tile_gaussian_sigma (`float`, *optional*, defaults to 0.3): Sigma parameter for Gaussian weighting of tiles. tile_weighting_method (`str`, *optional*, defaults to "Cosine"): Method for weighting tiles. Options: "Cosine" or "Gaussian". Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple` containing the output images. """ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] if not isinstance(control_image, list): control_image = [control_image] else: control_image = control_image.copy() if control_mode is None or isinstance(control_mode, list) and len(control_mode) == 0: raise ValueError("The value for `control_mode` is expected!") if not isinstance(control_mode, list): control_mode = [control_mode] if len(control_image) != len(control_mode): raise ValueError("Expected len(control_image) == len(control_mode)") num_control_type = controlnet.config.num_control_type # 0. Set internal use parameters height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) negative_original_size = negative_original_size or original_size negative_target_size = negative_target_size or target_size control_type = [0 for _ in range(num_control_type)] control_type = torch.Tensor(control_type) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False batch_size = 1 device = self._execution_device global_pool_conditions = controlnet.config.global_pool_conditions guess_mode = guess_mode or global_pool_conditions # 1. Check inputs for _image, control_idx in zip(control_image, control_mode): control_type[control_idx] = 1 self.check_inputs( prompt, height, width, _image, strength, num_inference_steps, normal_tile_overlap, border_tile_overlap, max_tile_size, tile_gaussian_sigma, tile_weighting_method, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2 Get tile width and tile height size tile_width, tile_height = _adaptive_tile_size((width, height), max_tile_size=max_tile_size) # 2.1 Calculate the number of tiles needed grid_rows, grid_cols = self._get_num_tiles( height, width, tile_height, tile_width, normal_tile_overlap, border_tile_overlap ) # 2.2 Expand prompt to number of tiles if not isinstance(prompt, list): prompt = [[prompt] * grid_cols] * grid_rows # 2.3 Update height and width tile size by tile size and tile overlap size width = (grid_cols - 1) * (tile_width - normal_tile_overlap) + min( tile_width, width - (grid_cols - 1) * (tile_width - normal_tile_overlap) ) height = (grid_rows - 1) * (tile_height - normal_tile_overlap) + min( tile_height, height - (grid_rows - 1) * (tile_height - normal_tile_overlap) ) # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) text_embeddings = [ [ self.encode_prompt( prompt=col, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) for col in row ] for row in prompt ] # 4. Prepare latent image image_tensor = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) # 4.1 Prepare controlnet_conditioning_image control_image = self.prepare_control_image( image=image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) control_type = ( control_type.reshape(1, -1) .to(device, dtype=controlnet.dtype) .repeat(batch_size * num_images_per_prompt * 2, 1) ) # 5. Prepare timesteps accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) extra_set_kwargs = {} if accepts_offset: extra_set_kwargs["offset"] = 1 self.scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) # 6. Prepare latent variables dtype = text_embeddings[0][0][0].dtype if latents is None: latents = self.prepare_latents( image_tensor, latent_timestep, batch_size, num_images_per_prompt, dtype, device, generator, True, ) # if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents * self.scheduler.sigmas[0] # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): controlnet_keep.append( 1.0 - float(i / len(timesteps) < control_guidance_start or (i + 1) / len(timesteps) > control_guidance_end) ) # 8.1 Prepare added time ids & embeddings # text_embeddings order: prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds embeddings_and_added_time = [] crops_coords_top_left = negative_crops_coords_top_left = (tile_width, tile_height) for row in range(grid_rows): addition_embed_type_row = [] for col in range(grid_cols): # extract generated values prompt_embeds = text_embeddings[row][col][0] negative_prompt_embeds = text_embeddings[row][col][1] pooled_prompt_embeds = text_embeddings[row][col][2] negative_pooled_prompt_embeds = text_embeddings[row][col][3] if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids, add_neg_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) addition_embed_type_row.append((prompt_embeds, add_text_embeds, add_time_ids)) embeddings_and_added_time.append(addition_embed_type_row) # 9. Prepare tiles weights and latent overlaps size to denoising process tile_weights, tile_row_overlaps, tile_col_overlaps = self.prepare_tiles( grid_rows, grid_cols, tile_weighting_method, tile_width, tile_height, normal_tile_overlap, border_tile_overlap, width, height, tile_gaussian_sigma, batch_size, device, dtype, ) # 10. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Diffuse each tile noise_preds = [] for row in range(grid_rows): noise_preds_row = [] for col in range(grid_cols): if self.interrupt: continue tile_row_overlap = tile_row_overlaps[row, col] tile_col_overlap = tile_col_overlaps[row, col] px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height ) tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end] # expand the latents if we are doing classifier free guidance latent_model_input = ( torch.cat([tile_latents] * 2) if self.do_classifier_free_guidance else tile_latents # 1, 4, ... ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = { "text_embeds": embeddings_and_added_time[row][col][1], "time_ids": embeddings_and_added_time[row][col][2], } # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = tile_latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = embeddings_and_added_time[row][col][0].chunk(2)[1] controlnet_added_cond_kwargs = { "text_embeds": embeddings_and_added_time[row][col][1].chunk(2)[1], "time_ids": embeddings_and_added_time[row][col][2].chunk(2)[1], } else: control_model_input = latent_model_input controlnet_prompt_embeds = embeddings_and_added_time[row][col][0] controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] px_row_init_pixel, px_row_end_pixel, px_col_init_pixel, px_col_end_pixel = _tile2pixel_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height ) tile_control_image = control_image[ :, :, px_row_init_pixel:px_row_end_pixel, px_col_init_pixel:px_col_end_pixel ] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=[tile_control_image], control_type=control_type, control_type_idx=control_mode, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [ torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples ] mid_block_res_sample = torch.cat( [torch.zeros_like(mid_block_res_sample), mid_block_res_sample] ) # predict the noise residual with torch.amp.autocast(device.type, dtype=dtype, enabled=dtype != self.unet.dtype): noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=embeddings_and_added_time[row][col][0], cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_tile = noise_pred_uncond + guidance_scale * ( noise_pred_text - noise_pred_uncond ) noise_preds_row.append(noise_pred_tile) noise_preds.append(noise_preds_row) # Stitch noise predictions for all tiles noise_pred = torch.zeros(latents.shape, device=device) contributors = torch.zeros(latents.shape, device=device) # Add each tile contribution to overall latents for row in range(grid_rows): for col in range(grid_cols): tile_row_overlap = tile_row_overlaps[row, col] tile_col_overlap = tile_col_overlaps[row, col] px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height ) tile_weights_resized = tile_weights[row, col] noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += ( noise_preds[row][col] * tile_weights_resized ) contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights_resized # Average overlapping areas with more than 1 contributor noise_pred /= contributors noise_pred = noise_pred.to(dtype) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) # update progress bar if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) else: image = latents # Offload all models self.maybe_free_model_hooks() result = StableDiffusionXLPipelineOutput(images=image) if not return_dict: return (image,) return result
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/mod_controlnet_tile_sr_sdxl.py", "license": "Apache License 2.0", "lines": 1602, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/lora/test_lora_layers_wan.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanTransformer3DModel from ..testing_utils import floats_tensor, require_peft_backend, skip_mps sys.path.append(".") from .utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend @skip_mps class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = WanPipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_kwargs = {} transformer_kwargs = { "patch_size": (1, 2, 2), "num_attention_heads": 2, "attention_head_dim": 12, "in_channels": 16, "out_channels": 16, "text_dim": 32, "freq_dim": 256, "ffn_dim": 32, "num_layers": 2, "cross_attn_norm": True, "qk_norm": "rms_norm_across_heads", "rope_max_seq_len": 32, } transformer_cls = WanTransformer3DModel vae_kwargs = { "base_dim": 3, "z_dim": 16, "dim_mult": [1, 1, 1, 1], "num_res_blocks": 1, "temperal_downsample": [False, True, True], } vae_cls = AutoencoderKLWan has_two_text_encoders = True tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" text_encoder_target_modules = ["q", "k", "v", "o"] supports_text_encoder_loras = False @property def output_shape(self): return (1, 9, 32, 32, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 16 num_channels = 4 num_frames = 9 num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 sizes = (4, 4) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "", "num_frames": num_frames, "num_inference_steps": 1, "guidance_scale": 6.0, "height": 32, "width": 32, "max_sequence_length": sequence_length, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs def test_simple_inference_with_text_lora_denoiser_fused_multi(self): super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) @unittest.skip("Not supported in Wan.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in Wan.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in Wan.") def test_modify_padding_mode(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/lora/test_lora_layers_wan.py", "license": "Apache License 2.0", "lines": 94, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/models/autoencoders/autoencoder_kl_magvit.py
# Copyright 2025 The EasyAnimate team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ...utils.accelerate_utils import apply_forward_hook from ..activations import get_activation from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import AutoencoderMixin, DecoderOutput, DiagonalGaussianDistribution logger = logging.get_logger(__name__) # pylint: disable=invalid-name class EasyAnimateCausalConv3d(nn.Conv3d): def __init__( self, in_channels: int, out_channels: int, kernel_size: int | tuple[int, ...] = 3, stride: int | tuple[int, ...] = 1, padding: int | tuple[int, ...] = 1, dilation: int | tuple[int, ...] = 1, groups: int = 1, bias: bool = True, padding_mode: str = "zeros", ): # Ensure kernel_size, stride, and dilation are tuples of length 3 kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size,) * 3 assert len(kernel_size) == 3, f"Kernel size must be a 3-tuple, got {kernel_size} instead." stride = stride if isinstance(stride, tuple) else (stride,) * 3 assert len(stride) == 3, f"Stride must be a 3-tuple, got {stride} instead." dilation = dilation if isinstance(dilation, tuple) else (dilation,) * 3 assert len(dilation) == 3, f"Dilation must be a 3-tuple, got {dilation} instead." # Unpack kernel size, stride, and dilation for temporal, height, and width dimensions t_ks, h_ks, w_ks = kernel_size self.t_stride, h_stride, w_stride = stride t_dilation, h_dilation, w_dilation = dilation # Calculate padding for temporal dimension to maintain causality t_pad = (t_ks - 1) * t_dilation # Calculate padding for height and width dimensions based on the padding parameter if padding is None: h_pad = math.ceil(((h_ks - 1) * h_dilation + (1 - h_stride)) / 2) w_pad = math.ceil(((w_ks - 1) * w_dilation + (1 - w_stride)) / 2) elif isinstance(padding, int): h_pad = w_pad = padding else: assert NotImplementedError # Store temporal padding and initialize flags and previous features cache self.temporal_padding = t_pad self.temporal_padding_origin = math.ceil(((t_ks - 1) * w_dilation + (1 - w_stride)) / 2) self.prev_features = None # Initialize the parent class with modified padding super().__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=(0, h_pad, w_pad), groups=groups, bias=bias, padding_mode=padding_mode, ) def _clear_conv_cache(self): del self.prev_features self.prev_features = None def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # Ensure input tensor is of the correct type dtype = hidden_states.dtype if self.prev_features is None: # Pad the input tensor in the temporal dimension to maintain causality hidden_states = F.pad( hidden_states, pad=(0, 0, 0, 0, self.temporal_padding, 0), mode="replicate", # TODO: check if this is necessary ) hidden_states = hidden_states.to(dtype=dtype) # Clear cache before processing and store previous features for causality self._clear_conv_cache() self.prev_features = hidden_states[:, :, -self.temporal_padding :].clone() # Process the input tensor in chunks along the temporal dimension num_frames = hidden_states.size(2) outputs = [] i = 0 while i + self.temporal_padding + 1 <= num_frames: out = super().forward(hidden_states[:, :, i : i + self.temporal_padding + 1]) i += self.t_stride outputs.append(out) return torch.concat(outputs, 2) else: # Concatenate previous features with the input tensor for continuous temporal processing if self.t_stride == 2: hidden_states = torch.concat( [self.prev_features[:, :, -(self.temporal_padding - 1) :], hidden_states], dim=2 ) else: hidden_states = torch.concat([self.prev_features, hidden_states], dim=2) hidden_states = hidden_states.to(dtype=dtype) # Clear cache and update previous features self._clear_conv_cache() self.prev_features = hidden_states[:, :, -self.temporal_padding :].clone() # Process the concatenated tensor in chunks along the temporal dimension num_frames = hidden_states.size(2) outputs = [] i = 0 while i + self.temporal_padding + 1 <= num_frames: out = super().forward(hidden_states[:, :, i : i + self.temporal_padding + 1]) i += self.t_stride outputs.append(out) return torch.concat(outputs, 2) class EasyAnimateResidualBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, non_linearity: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-6, spatial_group_norm: bool = True, dropout: float = 0.0, output_scale_factor: float = 1.0, ): super().__init__() self.output_scale_factor = output_scale_factor # Group normalization for input tensor self.norm1 = nn.GroupNorm( num_groups=norm_num_groups, num_channels=in_channels, eps=norm_eps, affine=True, ) self.nonlinearity = get_activation(non_linearity) self.conv1 = EasyAnimateCausalConv3d(in_channels, out_channels, kernel_size=3) self.norm2 = nn.GroupNorm(num_groups=norm_num_groups, num_channels=out_channels, eps=norm_eps, affine=True) self.dropout = nn.Dropout(dropout) self.conv2 = EasyAnimateCausalConv3d(out_channels, out_channels, kernel_size=3) if in_channels != out_channels: self.shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1) else: self.shortcut = nn.Identity() self.spatial_group_norm = spatial_group_norm def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: shortcut = self.shortcut(hidden_states) if self.spatial_group_norm: batch_size = hidden_states.size(0) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W] hidden_states = self.norm1(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute( 0, 2, 1, 3, 4 ) # [B * T, C, H, W] -> [B, C, T, H, W] else: hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) if self.spatial_group_norm: batch_size = hidden_states.size(0) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W] hidden_states = self.norm2(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute( 0, 2, 1, 3, 4 ) # [B * T, C, H, W] -> [B, C, T, H, W] else: hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) return (hidden_states + shortcut) / self.output_scale_factor class EasyAnimateDownsampler3D(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: tuple = (2, 2, 2)): super().__init__() self.conv = EasyAnimateCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=0 ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = F.pad(hidden_states, (0, 1, 0, 1)) hidden_states = self.conv(hidden_states) return hidden_states class EasyAnimateUpsampler3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, temporal_upsample: bool = False, spatial_group_norm: bool = True, ): super().__init__() out_channels = out_channels or in_channels self.temporal_upsample = temporal_upsample self.spatial_group_norm = spatial_group_norm self.conv = EasyAnimateCausalConv3d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size ) self.prev_features = None def _clear_conv_cache(self): del self.prev_features self.prev_features = None def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = F.interpolate(hidden_states, scale_factor=(1, 2, 2), mode="nearest") hidden_states = self.conv(hidden_states) if self.temporal_upsample: if self.prev_features is None: self.prev_features = hidden_states else: hidden_states = F.interpolate( hidden_states, scale_factor=(2, 1, 1), mode="trilinear" if not self.spatial_group_norm else "nearest", ) return hidden_states class EasyAnimateDownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-6, spatial_group_norm: bool = True, dropout: float = 0.0, output_scale_factor: float = 1.0, add_downsample: bool = True, add_temporal_downsample: bool = True, ): super().__init__() self.convs = nn.ModuleList([]) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.convs.append( EasyAnimateResidualBlock3D( in_channels=in_channels, out_channels=out_channels, non_linearity=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, spatial_group_norm=spatial_group_norm, dropout=dropout, output_scale_factor=output_scale_factor, ) ) if add_downsample and add_temporal_downsample: self.downsampler = EasyAnimateDownsampler3D(out_channels, out_channels, kernel_size=3, stride=(2, 2, 2)) self.spatial_downsample_factor = 2 self.temporal_downsample_factor = 2 elif add_downsample and not add_temporal_downsample: self.downsampler = EasyAnimateDownsampler3D(out_channels, out_channels, kernel_size=3, stride=(1, 2, 2)) self.spatial_downsample_factor = 2 self.temporal_downsample_factor = 1 else: self.downsampler = None self.spatial_downsample_factor = 1 self.temporal_downsample_factor = 1 def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for conv in self.convs: hidden_states = conv(hidden_states) if self.downsampler is not None: hidden_states = self.downsampler(hidden_states) return hidden_states class EasyAnimateUpBlock3d(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-6, spatial_group_norm: bool = False, dropout: float = 0.0, output_scale_factor: float = 1.0, add_upsample: bool = True, add_temporal_upsample: bool = True, ): super().__init__() self.convs = nn.ModuleList([]) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.convs.append( EasyAnimateResidualBlock3D( in_channels=in_channels, out_channels=out_channels, non_linearity=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, spatial_group_norm=spatial_group_norm, dropout=dropout, output_scale_factor=output_scale_factor, ) ) if add_upsample: self.upsampler = EasyAnimateUpsampler3D( in_channels, in_channels, temporal_upsample=add_temporal_upsample, spatial_group_norm=spatial_group_norm, ) else: self.upsampler = None def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for conv in self.convs: hidden_states = conv(hidden_states) if self.upsampler is not None: hidden_states = self.upsampler(hidden_states) return hidden_states class EasyAnimateMidBlock3d(nn.Module): def __init__( self, in_channels: int, num_layers: int = 1, act_fn: str = "silu", norm_num_groups: int = 32, norm_eps: float = 1e-6, spatial_group_norm: bool = True, dropout: float = 0.0, output_scale_factor: float = 1.0, ): super().__init__() norm_num_groups = norm_num_groups if norm_num_groups is not None else min(in_channels // 4, 32) self.convs = nn.ModuleList( [ EasyAnimateResidualBlock3D( in_channels=in_channels, out_channels=in_channels, non_linearity=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, spatial_group_norm=spatial_group_norm, dropout=dropout, output_scale_factor=output_scale_factor, ) ] ) for _ in range(num_layers - 1): self.convs.append( EasyAnimateResidualBlock3D( in_channels=in_channels, out_channels=in_channels, non_linearity=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, spatial_group_norm=spatial_group_norm, dropout=dropout, output_scale_factor=output_scale_factor, ) ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.convs[0](hidden_states) for resnet in self.convs[1:]: hidden_states = resnet(hidden_states) return hidden_states class EasyAnimateEncoder(nn.Module): r""" Causal encoder for 3D video-like data used in [EasyAnimate](https://huggingface.co/papers/2405.18991). """ _supports_gradient_checkpointing = True def __init__( self, in_channels: int = 3, out_channels: int = 8, down_block_types: tuple[str, ...] = ( "SpatialDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", ), block_out_channels: tuple[int, ...] = [128, 256, 512, 512], layers_per_block: int = 2, norm_num_groups: int = 32, act_fn: str = "silu", double_z: bool = True, spatial_group_norm: bool = False, ): super().__init__() # 1. Input convolution self.conv_in = EasyAnimateCausalConv3d(in_channels, block_out_channels[0], kernel_size=3) # 2. Down blocks self.down_blocks = nn.ModuleList([]) output_channels = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channels = output_channels output_channels = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 if down_block_type == "SpatialDownBlock3D": down_block = EasyAnimateDownBlock3D( in_channels=input_channels, out_channels=output_channels, num_layers=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, spatial_group_norm=spatial_group_norm, add_downsample=not is_final_block, add_temporal_downsample=False, ) elif down_block_type == "SpatialTemporalDownBlock3D": down_block = EasyAnimateDownBlock3D( in_channels=input_channels, out_channels=output_channels, num_layers=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, spatial_group_norm=spatial_group_norm, add_downsample=not is_final_block, add_temporal_downsample=True, ) else: raise ValueError(f"Unknown up block type: {down_block_type}") self.down_blocks.append(down_block) # 3. Middle block self.mid_block = EasyAnimateMidBlock3d( in_channels=block_out_channels[-1], num_layers=layers_per_block, act_fn=act_fn, spatial_group_norm=spatial_group_norm, norm_num_groups=norm_num_groups, norm_eps=1e-6, dropout=0, output_scale_factor=1, ) # 4. Output normalization & convolution self.spatial_group_norm = spatial_group_norm self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6, ) self.conv_act = get_activation(act_fn) # Initialize the output convolution layer conv_out_channels = 2 * out_channels if double_z else out_channels self.conv_out = EasyAnimateCausalConv3d(block_out_channels[-1], conv_out_channels, kernel_size=3) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # hidden_states: (B, C, T, H, W) hidden_states = self.conv_in(hidden_states) for down_block in self.down_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(down_block, hidden_states) else: hidden_states = down_block(hidden_states) hidden_states = self.mid_block(hidden_states) if self.spatial_group_norm: batch_size = hidden_states.size(0) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) hidden_states = self.conv_norm_out(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3, 4) else: hidden_states = self.conv_norm_out(hidden_states) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class EasyAnimateDecoder(nn.Module): r""" Causal decoder for 3D video-like data used in [EasyAnimate](https://huggingface.co/papers/2405.18991). """ _supports_gradient_checkpointing = True def __init__( self, in_channels: int = 8, out_channels: int = 3, up_block_types: tuple[str, ...] = ( "SpatialUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", ), block_out_channels: tuple[int, ...] = [128, 256, 512, 512], layers_per_block: int = 2, norm_num_groups: int = 32, act_fn: str = "silu", spatial_group_norm: bool = False, ): super().__init__() # 1. Input convolution self.conv_in = EasyAnimateCausalConv3d(in_channels, block_out_channels[-1], kernel_size=3) # 2. Middle block self.mid_block = EasyAnimateMidBlock3d( in_channels=block_out_channels[-1], num_layers=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, dropout=0, output_scale_factor=1, ) # 3. Up blocks self.up_blocks = nn.ModuleList([]) reversed_block_out_channels = list(reversed(block_out_channels)) output_channels = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): input_channels = output_channels output_channels = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 # Create and append up block to up_blocks if up_block_type == "SpatialUpBlock3D": up_block = EasyAnimateUpBlock3d( in_channels=input_channels, out_channels=output_channels, num_layers=layers_per_block + 1, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, spatial_group_norm=spatial_group_norm, add_upsample=not is_final_block, add_temporal_upsample=False, ) elif up_block_type == "SpatialTemporalUpBlock3D": up_block = EasyAnimateUpBlock3d( in_channels=input_channels, out_channels=output_channels, num_layers=layers_per_block + 1, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=1e-6, spatial_group_norm=spatial_group_norm, add_upsample=not is_final_block, add_temporal_upsample=True, ) else: raise ValueError(f"Unknown up block type: {up_block_type}") self.up_blocks.append(up_block) # Output normalization and activation self.spatial_group_norm = spatial_group_norm self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6, ) self.conv_act = get_activation(act_fn) # Output convolution layer self.conv_out = EasyAnimateCausalConv3d(block_out_channels[0], out_channels, kernel_size=3) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # hidden_states: (B, C, T, H, W) hidden_states = self.conv_in(hidden_states) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states) else: hidden_states = self.mid_block(hidden_states) for up_block in self.up_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(up_block, hidden_states) else: hidden_states = up_block(hidden_states) if self.spatial_group_norm: batch_size = hidden_states.size(0) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, T, H, W] -> [B * T, C, H, W] hidden_states = self.conv_norm_out(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute( 0, 2, 1, 3, 4 ) # [B * T, C, H, W] -> [B, C, T, H, W] else: hidden_states = self.conv_norm_out(hidden_states) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class AutoencoderKLMagvit(ModelMixin, AutoencoderMixin, ConfigMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model is used in [EasyAnimate](https://huggingface.co/papers/2405.18991). This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, latent_channels: int = 16, out_channels: int = 3, block_out_channels: tuple[int, ...] = [128, 256, 512, 512], down_block_types: tuple[str, ...] = [ "SpatialDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", ], up_block_types: tuple[str, ...] = [ "SpatialUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", ], layers_per_block: int = 2, act_fn: str = "silu", norm_num_groups: int = 32, scaling_factor: float = 0.7125, spatial_group_norm: bool = True, ): super().__init__() # Initialize the encoder self.encoder = EasyAnimateEncoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, double_z=True, spatial_group_norm=spatial_group_norm, ) # Initialize the decoder self.decoder = EasyAnimateDecoder( in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, spatial_group_norm=spatial_group_norm, ) # Initialize convolution layers for quantization and post-quantization self.quant_conv = nn.Conv3d(2 * latent_channels, 2 * latent_channels, kernel_size=1) self.post_quant_conv = nn.Conv3d(latent_channels, latent_channels, kernel_size=1) self.spatial_compression_ratio = 2 ** (len(block_out_channels) - 1) self.temporal_compression_ratio = 2 ** (len(block_out_channels) - 2) # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. self.use_slicing = False # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the # intermediate tiles together, the memory requirement can be lowered. self.use_tiling = False # When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames # at a fixed frame batch size (based on `self.num_latent_frames_batch_size`), the memory requirement can be lowered. self.use_framewise_encoding = False self.use_framewise_decoding = False # Assign mini-batch sizes for encoder and decoder self.num_sample_frames_batch_size = 4 self.num_latent_frames_batch_size = 1 # The minimal tile height and width for spatial tiling to be used self.tile_sample_min_height = 512 self.tile_sample_min_width = 512 self.tile_sample_min_num_frames = 4 # The minimal distance between two spatial tiles self.tile_sample_stride_height = 448 self.tile_sample_stride_width = 448 self.tile_sample_stride_num_frames = 8 def _clear_conv_cache(self): # Clear cache for convolutional layers if needed for name, module in self.named_modules(): if isinstance(module, EasyAnimateCausalConv3d): module._clear_conv_cache() if isinstance(module, EasyAnimateUpsampler3D): module._clear_conv_cache() def enable_tiling( self, tile_sample_min_height: int | None = None, tile_sample_min_width: int | None = None, tile_sample_min_num_frames: int | None = None, tile_sample_stride_height: float | None = None, tile_sample_stride_width: float | None = None, tile_sample_stride_num_frames: float | None = None, ) -> None: r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. Args: tile_sample_min_height (`int`, *optional*): The minimum height required for a sample to be separated into tiles across the height dimension. tile_sample_min_width (`int`, *optional*): The minimum width required for a sample to be separated into tiles across the width dimension. tile_sample_stride_height (`int`, *optional*): The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are no tiling artifacts produced across the height dimension. tile_sample_stride_width (`int`, *optional*): The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling artifacts produced across the width dimension. """ self.use_tiling = True self.use_framewise_decoding = True self.use_framewise_encoding = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_sample_min_num_frames = tile_sample_min_num_frames or self.tile_sample_min_num_frames self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width self.tile_sample_stride_num_frames = tile_sample_stride_num_frames or self.tile_sample_stride_num_frames @apply_forward_hook def _encode( self, x: torch.Tensor, return_dict: bool = True ) -> AutoencoderKLOutput | tuple[DiagonalGaussianDistribution]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_tiling and (x.shape[-1] > self.tile_sample_min_height or x.shape[-2] > self.tile_sample_min_width): return self.tiled_encode(x, return_dict=return_dict) first_frames = self.encoder(x[:, :, :1, :, :]) h = [first_frames] for i in range(1, x.shape[2], self.num_sample_frames_batch_size): next_frames = self.encoder(x[:, :, i : i + self.num_sample_frames_batch_size, :, :]) h.append(next_frames) h = torch.cat(h, dim=2) moments = self.quant_conv(h) self._clear_conv_cache() return moments @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> AutoencoderKLOutput | tuple[DiagonalGaussianDistribution]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded videos. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: batch_size, num_channels, num_frames, height, width = z.shape tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio if self.use_tiling and (z.shape[-1] > tile_latent_min_height or z.shape[-2] > tile_latent_min_width): return self.tiled_decode(z, return_dict=return_dict) z = self.post_quant_conv(z) # Process the first frame and save the result first_frames = self.decoder(z[:, :, :1, :, :]) # Initialize the list to store the processed frames, starting with the first frame dec = [first_frames] # Process the remaining frames, with the number of frames processed at a time determined by mini_batch_decoder for i in range(1, z.shape[2], self.num_latent_frames_batch_size): next_frames = self.decoder(z[:, :, i : i + self.num_latent_frames_batch_size, :, :]) dec.append(next_frames) # Concatenate all processed frames along the channel dimension dec = torch.cat(dec, dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: """ Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample self._clear_conv_cache() if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( y / blend_extent ) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[4], b.shape[4], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( x / blend_extent ) return b def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> AutoencoderKLOutput: batch_size, num_channels, num_frames, height, width = x.shape latent_height = height // self.spatial_compression_ratio latent_width = width // self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = tile_latent_min_height - tile_latent_stride_height blend_width = tile_latent_min_width - tile_latent_stride_width # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, height, self.tile_sample_stride_height): row = [] for j in range(0, width, self.tile_sample_stride_width): tile = x[ :, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width, ] first_frames = self.encoder(tile[:, :, 0:1, :, :]) tile_h = [first_frames] for k in range(1, num_frames, self.num_sample_frames_batch_size): next_frames = self.encoder(tile[:, :, k : k + self.num_sample_frames_batch_size, :, :]) tile_h.append(next_frames) tile = torch.cat(tile_h, dim=2) tile = self.quant_conv(tile) self._clear_conv_cache() row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :latent_height, :latent_width]) result_rows.append(torch.cat(result_row, dim=4)) moments = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width] return moments def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: batch_size, num_channels, num_frames, height, width = z.shape sample_height = height * self.spatial_compression_ratio sample_width = width * self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio blend_height = self.tile_sample_min_height - self.tile_sample_stride_height blend_width = self.tile_sample_min_width - self.tile_sample_stride_width # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, tile_latent_stride_height): row = [] for j in range(0, width, tile_latent_stride_width): tile = z[ :, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width, ] tile = self.post_quant_conv(tile) # Process the first frame and save the result first_frames = self.decoder(tile[:, :, :1, :, :]) # Initialize the list to store the processed frames, starting with the first frame tile_dec = [first_frames] # Process the remaining frames, with the number of frames processed at a time determined by mini_batch_decoder for k in range(1, num_frames, self.num_latent_frames_batch_size): next_frames = self.decoder(tile[:, :, k : k + self.num_latent_frames_batch_size, :, :]) tile_dec.append(next_frames) # Concatenate all processed frames along the channel dimension decoded = torch.cat(tile_dec, dim=2) self._clear_conv_cache() row.append(decoded) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width]) result_rows.append(torch.cat(result_row, dim=4)) dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width] if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: torch.Generator | None = None, ) -> DecoderOutput | torch.Tensor: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/autoencoders/autoencoder_kl_magvit.py", "license": "Apache License 2.0", "lines": 923, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/models/transformers/transformer_easyanimate.py
# Copyright 2025 The EasyAnimate team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention import Attention, FeedForward from ..embeddings import TimestepEmbedding, Timesteps, get_3d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNorm, FP32LayerNorm, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class EasyAnimateLayerNormZero(nn.Module): def __init__( self, conditioning_dim: int, embedding_dim: int, elementwise_affine: bool = True, eps: float = 1e-5, bias: bool = True, norm_type: str = "fp32_layer_norm", ) -> None: super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(conditioning_dim, 6 * embedding_dim, bias=bias) if norm_type == "layer_norm": self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, eps=eps) elif norm_type == "fp32_layer_norm": self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=elementwise_affine, eps=eps) else: raise ValueError( f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'." ) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: shift, scale, gate, enc_shift, enc_scale, enc_gate = self.linear(self.silu(temb)).chunk(6, dim=1) hidden_states = self.norm(hidden_states) * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) encoder_hidden_states = self.norm(encoder_hidden_states) * (1 + enc_scale.unsqueeze(1)) + enc_shift.unsqueeze( 1 ) return hidden_states, encoder_hidden_states, gate, enc_gate class EasyAnimateRotaryPosEmbed(nn.Module): def __init__(self, patch_size: int, rope_dim: list[int]) -> None: super().__init__() self.patch_size = patch_size self.rope_dim = rope_dim def get_resize_crop_region_for_grid(self, src, tgt_width, tgt_height): tw = tgt_width th = tgt_height h, w = src r = h / w if r > (th / tw): resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: bs, c, num_frames, grid_height, grid_width = hidden_states.size() grid_height = grid_height // self.patch_size grid_width = grid_width // self.patch_size base_size_width = 90 // self.patch_size base_size_height = 60 // self.patch_size grid_crops_coords = self.get_resize_crop_region_for_grid( (grid_height, grid_width), base_size_width, base_size_height ) image_rotary_emb = get_3d_rotary_pos_embed( self.rope_dim, grid_crops_coords, grid_size=(grid_height, grid_width), temporal_size=hidden_states.size(2), use_real=True, ) return image_rotary_emb class EasyAnimateAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is used in the EasyAnimateTransformer3DModel model. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "EasyAnimateAttnProcessor2_0 requires PyTorch 2.0 or above. To use it, please install PyTorch 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: if attn.add_q_proj is None and encoder_hidden_states is not None: hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) # 1. QKV projections query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2) key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2) value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2) # 2. QK normalization if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # 3. Encoder condition QKV projection and normalization if attn.add_q_proj is not None and encoder_hidden_states is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) encoder_query = encoder_query.unflatten(2, (attn.heads, -1)).transpose(1, 2) encoder_key = encoder_key.unflatten(2, (attn.heads, -1)).transpose(1, 2) encoder_value = encoder_value.unflatten(2, (attn.heads, -1)).transpose(1, 2) if attn.norm_added_q is not None: encoder_query = attn.norm_added_q(encoder_query) if attn.norm_added_k is not None: encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([encoder_query, query], dim=2) key = torch.cat([encoder_key, key], dim=2) value = torch.cat([encoder_value, value], dim=2) if image_rotary_emb is not None: from ..embeddings import apply_rotary_emb query[:, :, encoder_hidden_states.shape[1] :] = apply_rotary_emb( query[:, :, encoder_hidden_states.shape[1] :], image_rotary_emb ) if not attn.is_cross_attention: key[:, :, encoder_hidden_states.shape[1] :] = apply_rotary_emb( key[:, :, encoder_hidden_states.shape[1] :], image_rotary_emb ) # 5. Attention hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).flatten(2, 3) hidden_states = hidden_states.to(query.dtype) # 6. Output projection if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = ( hidden_states[:, : encoder_hidden_states.shape[1]], hidden_states[:, encoder_hidden_states.shape[1] :], ) if getattr(attn, "to_out", None) is not None: hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if getattr(attn, "to_add_out", None) is not None: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) else: if getattr(attn, "to_out", None) is not None: hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states, encoder_hidden_states @maybe_allow_in_graph class EasyAnimateTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, time_embed_dim: int, dropout: float = 0.0, activation_fn: str = "gelu-approximate", norm_elementwise_affine: bool = True, norm_eps: float = 1e-6, final_dropout: bool = True, ff_inner_dim: int | None = None, ff_bias: bool = True, qk_norm: bool = True, after_norm: bool = False, norm_type: str = "fp32_layer_norm", is_mmdit_block: bool = True, ): super().__init__() # Attention Part self.norm1 = EasyAnimateLayerNormZero( time_embed_dim, dim, norm_elementwise_affine, norm_eps, norm_type=norm_type, bias=True ) self.attn1 = Attention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, qk_norm="layer_norm" if qk_norm else None, eps=1e-6, bias=True, added_proj_bias=True, added_kv_proj_dim=dim if is_mmdit_block else None, context_pre_only=False if is_mmdit_block else None, processor=EasyAnimateAttnProcessor2_0(), ) # FFN Part self.norm2 = EasyAnimateLayerNormZero( time_embed_dim, dim, norm_elementwise_affine, norm_eps, norm_type=norm_type, bias=True ) self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias, ) self.txt_ff = None if is_mmdit_block: self.txt_ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, inner_dim=ff_inner_dim, bias=ff_bias, ) self.norm3 = None if after_norm: self.norm3 = FP32LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: # 1. Attention norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( hidden_states, encoder_hidden_states, temb ) attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + gate_msa.unsqueeze(1) * attn_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_msa.unsqueeze(1) * attn_encoder_hidden_states # 2. Feed-forward norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2( hidden_states, encoder_hidden_states, temb ) if self.norm3 is not None: norm_hidden_states = self.norm3(self.ff(norm_hidden_states)) if self.txt_ff is not None: norm_encoder_hidden_states = self.norm3(self.txt_ff(norm_encoder_hidden_states)) else: norm_encoder_hidden_states = self.norm3(self.ff(norm_encoder_hidden_states)) else: norm_hidden_states = self.ff(norm_hidden_states) if self.txt_ff is not None: norm_encoder_hidden_states = self.txt_ff(norm_encoder_hidden_states) else: norm_encoder_hidden_states = self.ff(norm_encoder_hidden_states) hidden_states = hidden_states + gate_ff.unsqueeze(1) * norm_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_ff.unsqueeze(1) * norm_encoder_hidden_states return hidden_states, encoder_hidden_states class EasyAnimateTransformer3DModel(ModelMixin, ConfigMixin): """ A Transformer model for video-like data in [EasyAnimate](https://github.com/aigc-apps/EasyAnimate). Parameters: num_attention_heads (`int`, defaults to `48`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `64`): The number of channels in each head. in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `16`): The number of channels in the output. patch_size (`int`, defaults to `2`): The size of the patches to use in the patch embedding layer. sample_width (`int`, defaults to `90`): The width of the input latents. sample_height (`int`, defaults to `60`): The height of the input latents. activation_fn (`str`, defaults to `"gelu-approximate"`): Activation function to use in feed-forward. timestep_activation_fn (`str`, defaults to `"silu"`): Activation function to use when generating the timestep embeddings. num_layers (`int`, defaults to `30`): The number of layers of Transformer blocks to use. mmdit_layers (`int`, defaults to `1000`): The number of layers of Multi Modal Transformer blocks to use. dropout (`float`, defaults to `0.0`): The dropout probability to use. time_embed_dim (`int`, defaults to `512`): Output dimension of timestep embeddings. text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. norm_eps (`float`, defaults to `1e-5`): The epsilon value to use in normalization layers. norm_elementwise_affine (`bool`, defaults to `True`): Whether to use elementwise affine in normalization layers. flip_sin_to_cos (`bool`, defaults to `True`): Whether to flip the sin to cos in the time embedding. time_position_encoding_type (`str`, defaults to `3d_rope`): Type of time position encoding. after_norm (`bool`, defaults to `False`): Flag to apply normalization after. resize_inpaint_mask_directly (`bool`, defaults to `True`): Flag to resize inpaint mask directly. enable_text_attention_mask (`bool`, defaults to `True`): Flag to enable text attention mask. add_noise_in_inpaint_model (`bool`, defaults to `False`): Flag to add noise in inpaint model. """ _supports_gradient_checkpointing = True _no_split_modules = ["EasyAnimateTransformerBlock"] _skip_layerwise_casting_patterns = ["^proj$", "norm", "^proj_out$"] @register_to_config def __init__( self, num_attention_heads: int = 48, attention_head_dim: int = 64, in_channels: int | None = None, out_channels: int | None = None, patch_size: int | None = None, sample_width: int = 90, sample_height: int = 60, activation_fn: str = "gelu-approximate", timestep_activation_fn: str = "silu", freq_shift: int = 0, num_layers: int = 48, mmdit_layers: int = 48, dropout: float = 0.0, time_embed_dim: int = 512, add_norm_text_encoder: bool = False, text_embed_dim: int = 3584, text_embed_dim_t5: int = None, norm_eps: float = 1e-5, norm_elementwise_affine: bool = True, flip_sin_to_cos: bool = True, time_position_encoding_type: str = "3d_rope", after_norm=False, resize_inpaint_mask_directly: bool = True, enable_text_attention_mask: bool = True, add_noise_in_inpaint_model: bool = True, ): super().__init__() inner_dim = num_attention_heads * attention_head_dim # 1. Timestep embedding self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift) self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn) self.rope_embedding = EasyAnimateRotaryPosEmbed(patch_size, attention_head_dim) # 2. Patch embedding self.proj = nn.Conv2d( in_channels, inner_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=True ) # 3. Text refined embedding self.text_proj = None self.text_proj_t5 = None if not add_norm_text_encoder: self.text_proj = nn.Linear(text_embed_dim, inner_dim) if text_embed_dim_t5 is not None: self.text_proj_t5 = nn.Linear(text_embed_dim_t5, inner_dim) else: self.text_proj = nn.Sequential( RMSNorm(text_embed_dim, 1e-6, elementwise_affine=True), nn.Linear(text_embed_dim, inner_dim) ) if text_embed_dim_t5 is not None: self.text_proj_t5 = nn.Sequential( RMSNorm(text_embed_dim, 1e-6, elementwise_affine=True), nn.Linear(text_embed_dim_t5, inner_dim) ) # 4. Transformer blocks self.transformer_blocks = nn.ModuleList( [ EasyAnimateTransformerBlock( dim=inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, time_embed_dim=time_embed_dim, dropout=dropout, activation_fn=activation_fn, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, after_norm=after_norm, is_mmdit_block=True if _ < mmdit_layers else False, ) for _ in range(num_layers) ] ) self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine) # 5. Output norm & projection self.norm_out = AdaLayerNorm( embedding_dim=time_embed_dim, output_dim=2 * inner_dim, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, chunk_dim=1, ) self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, timestep: torch.Tensor, timestep_cond: torch.Tensor | None = None, encoder_hidden_states: torch.Tensor | None = None, encoder_hidden_states_t5: torch.Tensor | None = None, inpaint_latents: torch.Tensor | None = None, control_latents: torch.Tensor | None = None, return_dict: bool = True, ) -> tuple[torch.Tensor] | Transformer2DModelOutput: batch_size, channels, video_length, height, width = hidden_states.size() p = self.config.patch_size post_patch_height = height // p post_patch_width = width // p # 1. Time embedding temb = self.time_proj(timestep).to(dtype=hidden_states.dtype) temb = self.time_embedding(temb, timestep_cond) image_rotary_emb = self.rope_embedding(hidden_states) # 2. Patch embedding if inpaint_latents is not None: hidden_states = torch.concat([hidden_states, inpaint_latents], 1) if control_latents is not None: hidden_states = torch.concat([hidden_states, control_latents], 1) hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) # [B, C, F, H, W] -> [BF, C, H, W] hidden_states = self.proj(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute( 0, 2, 1, 3, 4 ) # [BF, C, H, W] -> [B, F, C, H, W] hidden_states = hidden_states.flatten(2, 4).transpose(1, 2) # [B, F, C, H, W] -> [B, FHW, C] # 3. Text embedding encoder_hidden_states = self.text_proj(encoder_hidden_states) if encoder_hidden_states_t5 is not None: encoder_hidden_states_t5 = self.text_proj_t5(encoder_hidden_states_t5) encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1).contiguous() # 4. Transformer blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states, encoder_hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb ) else: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, image_rotary_emb ) hidden_states = self.norm_final(hidden_states) # 5. Output norm & projection hidden_states = self.norm_out(hidden_states, temb=temb) hidden_states = self.proj_out(hidden_states) # 6. Unpatchify p = self.config.patch_size output = hidden_states.reshape(batch_size, video_length, post_patch_height, post_patch_width, channels, p, p) output = output.permute(0, 4, 1, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_easyanimate.py", "license": "Apache License 2.0", "lines": 460, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/easyanimate/pipeline_easyanimate.py
# Copyright 2025 The EasyAnimate team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable import torch from transformers import ( BertModel, BertTokenizer, Qwen2Tokenizer, Qwen2VLForConditionalGeneration, ) from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...models import AutoencoderKLMagvit, EasyAnimateTransformer3DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from .pipeline_output import EasyAnimatePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import EasyAnimatePipeline >>> from diffusers.utils import export_to_video >>> # Models: "alibaba-pai/EasyAnimateV5.1-12b-zh" >>> pipe = EasyAnimatePipeline.from_pretrained( ... "alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers", torch_dtype=torch.float16 ... ).to("cuda") >>> prompt = ( ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. " ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " ... "atmosphere of this unique musical performance." ... ) >>> sample_size = (512, 512) >>> video = pipe( ... prompt=prompt, ... guidance_scale=6, ... negative_prompt="bad detailed", ... height=sample_size[0], ... width=sample_size[1], ... num_inference_steps=50, ... ).frames[0] >>> export_to_video(video, "output.mp4", fps=8) ``` """ # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): tw = tgt_width th = tgt_height h, w = src r = h / w if r > (th / tw): resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class EasyAnimatePipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using EasyAnimate. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) EasyAnimate uses one text encoder [qwen2 vl](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) in V5.1. Args: vae ([`AutoencoderKLMagvit`]): Variational Auto-Encoder (VAE) Model to encode and decode video to and from latent representations. text_encoder (`~transformers.Qwen2VLForConditionalGeneration`, `~transformers.BertModel` | None): EasyAnimate uses [qwen2 vl](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) in V5.1. tokenizer (`~transformers.Qwen2Tokenizer`, `~transformers.BertTokenizer` | None): A `Qwen2Tokenizer` or `BertTokenizer` to tokenize text. transformer ([`EasyAnimateTransformer3DModel`]): The EasyAnimate model designed by EasyAnimate Team. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with EasyAnimate to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKLMagvit, text_encoder: Qwen2VLForConditionalGeneration | BertModel, tokenizer: Qwen2Tokenizer | BertTokenizer, transformer: EasyAnimateTransformer3DModel, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.enable_text_attention_mask = ( self.transformer.config.enable_text_attention_mask if getattr(self, "transformer", None) is not None else True ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 4 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) def encode_prompt( self, prompt: str | list[str], num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: str | list[str] | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 256, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device dtype (`torch.dtype`): torch dtype num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds` is passed directly. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. """ dtype = dtype or self.text_encoder.dtype device = device or self.text_encoder.device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(prompt, str): messages = [ { "role": "user", "content": [{"type": "text", "text": prompt}], } ] else: messages = [ { "role": "user", "content": [{"type": "text", "text": _prompt}], } for _prompt in prompt ] text = [ self.tokenizer.apply_chat_template([m], tokenize=False, add_generation_prompt=True) for m in messages ] text_inputs = self.tokenizer( text=text, padding="max_length", max_length=max_sequence_length, truncation=True, return_attention_mask=True, padding_side="right", return_tensors="pt", ) text_inputs = text_inputs.to(self.text_encoder.device) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask if self.enable_text_attention_mask: # Inference: Generation of the output prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True ).hidden_states[-2] else: raise ValueError("LLM needs attention_mask") prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.to(device=device) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: if negative_prompt is not None and isinstance(negative_prompt, str): messages = [ { "role": "user", "content": [{"type": "text", "text": negative_prompt}], } ] else: messages = [ { "role": "user", "content": [{"type": "text", "text": _negative_prompt}], } for _negative_prompt in negative_prompt ] text = [ self.tokenizer.apply_chat_template([m], tokenize=False, add_generation_prompt=True) for m in messages ] text_inputs = self.tokenizer( text=text, padding="max_length", max_length=max_sequence_length, truncation=True, return_attention_mask=True, padding_side="right", return_tensors="pt", ) text_inputs = text_inputs.to(self.text_encoder.device) text_input_ids = text_inputs.input_ids negative_prompt_attention_mask = text_inputs.attention_mask if self.enable_text_attention_mask: # Inference: Generation of the output negative_prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=negative_prompt_attention_mask, output_hidden_states=True, ).hidden_states[-2] else: raise ValueError("LLM needs attention_mask") negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.to(device=device) return prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, callback_on_step_end_tensor_inputs=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, (num_frames - 1) // self.vae_temporal_compression_ratio + 1, height // self.vae_spatial_compression_ratio, width // self.vae_spatial_compression_ratio, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler if hasattr(self.scheduler, "init_noise_sigma"): latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, num_frames: int | None = 49, height: int | None = 512, width: int | None = 512, num_inference_steps: int | None = 50, guidance_scale: float | None = 5.0, negative_prompt: str | list[str] | None = None, num_images_per_prompt: int | None = 1, eta: float | None = 0.0, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, timesteps: list[int] | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], guidance_rescale: float = 0.0, ): r""" Generates images or video using the EasyAnimate pipeline based on the provided prompts. Examples: prompt (`str` or `list[str]`, *optional*): Text prompts to guide the image or video generation. If not provided, use `prompt_embeds` instead. num_frames (`int`, *optional*): Length of the generated video (in frames). height (`int`, *optional*): Height of the generated image in pixels. width (`int`, *optional*): Width of the generated image in pixels. num_inference_steps (`int`, *optional*, defaults to 50): Number of denoising steps during generation. More steps generally yield higher quality images but slow down inference. guidance_scale (`float`, *optional*, defaults to 5.0): Encourages the model to align outputs with prompts. A higher value may decrease image quality. negative_prompt (`str` or `list[str]`, *optional*): Prompts indicating what to exclude in generation. If not specified, use `negative_prompt_embeds`. num_images_per_prompt (`int`, *optional*, defaults to 1): Number of images to generate for each prompt. eta (`float`, *optional*, defaults to 0.0): Applies to DDIM scheduling. Controlled by the eta parameter from the related literature. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A generator to ensure reproducibility in image generation. latents (`torch.Tensor`, *optional*): Predefined latent tensors to condition generation. prompt_embeds (`torch.Tensor`, *optional*): Text embeddings for the prompts. Overrides prompt string inputs for more flexibility. negative_prompt_embeds (`torch.Tensor`, *optional*): Embeddings for negative prompts. Overrides string inputs if defined. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the primary prompt embeddings. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for negative prompt embeddings. output_type (`str`, *optional*, defaults to "latent"): Format of the generated output, either as a PIL image or as a NumPy array. return_dict (`bool`, *optional*, defaults to `True`): If `True`, returns a structured output. Otherwise returns a simple tuple. callback_on_step_end (`Callable`, *optional*): Functions called at the end of each denoising step. callback_on_step_end_tensor_inputs (`list[str]`, *optional*): Tensor names to be included in callback function calls. guidance_rescale (`float`, *optional*, defaults to 0.0): Adjusts noise levels based on guidance scale. original_size (`tuple[int, int]`, *optional*, defaults to `(1024, 1024)`): Original dimensions of the output. target_size (`tuple[int, int]`, *optional*): Desired output dimensions for calculations. crops_coords_top_left (`tuple[int, int]`, *optional*, defaults to `(0, 0)`): Coordinates for cropping. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. default height and width height = int((height // 16) * 16) width = int((width // 16) * 16) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = self.transformer.dtype # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, device=device, dtype=dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) # 4. Prepare timesteps if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device if isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler): timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps, mu=1 ) else: timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps ) # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, num_frames, height, width, dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents if hasattr(self.scheduler, "scale_model_input"): latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( dtype=latent_model_input.dtype ) # predict the noise residual noise_pred = self.transformer( latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, return_dict=False, )[0] if noise_pred.size()[1] != self.vae.config.latent_channels: noise_pred, _ = noise_pred.chunk(2, dim=1) # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": latents = 1 / self.vae.config.scaling_factor * latents video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return EasyAnimatePipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/easyanimate/pipeline_easyanimate.py", "license": "Apache License 2.0", "lines": 680, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/easyanimate/pipeline_easyanimate_control.py
# Copyright 2025 The EasyAnimate team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable import numpy as np import torch import torch.nn.functional as F from PIL import Image from transformers import ( BertModel, BertTokenizer, Qwen2Tokenizer, Qwen2VLForConditionalGeneration, ) from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import VaeImageProcessor from ...models import AutoencoderKLMagvit, EasyAnimateTransformer3DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from .pipeline_output import EasyAnimatePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import EasyAnimateControlPipeline >>> from diffusers.pipelines.easyanimate.pipeline_easyanimate_control import get_video_to_video_latent >>> from diffusers.utils import export_to_video, load_video >>> pipe = EasyAnimateControlPipeline.from_pretrained( ... "alibaba-pai/EasyAnimateV5.1-12b-zh-Control-diffusers", torch_dtype=torch.bfloat16 ... ) >>> pipe.to("cuda") >>> control_video = load_video( ... "https://huggingface.co/alibaba-pai/EasyAnimateV5.1-12b-zh-Control/blob/main/asset/pose.mp4" ... ) >>> prompt = ( ... "In this sunlit outdoor garden, a beautiful woman is dressed in a knee-length, sleeveless white dress. " ... "The hem of her dress gently sways with her graceful dance, much like a butterfly fluttering in the breeze. " ... "Sunlight filters through the leaves, casting dappled shadows that highlight her soft features and clear eyes, " ... "making her appear exceptionally elegant. It seems as if every movement she makes speaks of youth and vitality. " ... "As she twirls on the grass, her dress flutters, as if the entire garden is rejoicing in her dance. " ... "The colorful flowers around her sway in the gentle breeze, with roses, chrysanthemums, and lilies each " ... "releasing their fragrances, creating a relaxed and joyful atmosphere." ... ) >>> sample_size = (672, 384) >>> num_frames = 49 >>> input_video, _, _ = get_video_to_video_latent(control_video, num_frames, sample_size) >>> video = pipe( ... prompt, ... num_frames=num_frames, ... negative_prompt="Twisted body, limb deformities, text subtitles, comics, stillness, ugliness, errors, garbled text.", ... height=sample_size[0], ... width=sample_size[1], ... control_video=input_video, ... ).frames[0] >>> export_to_video(video, "output.mp4", fps=8) ``` """ def preprocess_image(image, sample_size): """ Preprocess a single image (PIL.Image, numpy.ndarray, or torch.Tensor) to a resized tensor. """ if isinstance(image, torch.Tensor): # If input is a tensor, assume it's in CHW format and resize using interpolation image = torch.nn.functional.interpolate( image.unsqueeze(0), size=sample_size, mode="bilinear", align_corners=False ).squeeze(0) elif isinstance(image, Image.Image): # If input is a PIL image, resize and convert to numpy array image = image.resize((sample_size[1], sample_size[0])) image = np.array(image) elif isinstance(image, np.ndarray): # If input is a numpy array, resize using PIL image = Image.fromarray(image).resize((sample_size[1], sample_size[0])) image = np.array(image) else: raise ValueError("Unsupported input type. Expected PIL.Image, numpy.ndarray, or torch.Tensor.") # Convert to tensor if not already if not isinstance(image, torch.Tensor): image = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0 # HWC -> CHW, normalize to [0, 1] return image def get_video_to_video_latent(input_video, num_frames, sample_size, validation_video_mask=None, ref_image=None): if input_video is not None: # Convert each frame in the list to tensor input_video = [preprocess_image(frame, sample_size=sample_size) for frame in input_video] # Stack all frames into a single tensor (F, C, H, W) input_video = torch.stack(input_video)[:num_frames] # Add batch dimension (B, F, C, H, W) input_video = input_video.permute(1, 0, 2, 3).unsqueeze(0) if validation_video_mask is not None: # Handle mask input validation_video_mask = preprocess_image(validation_video_mask, size=sample_size) input_video_mask = torch.where(validation_video_mask < 240 / 255.0, 0.0, 255) # Adjust mask dimensions to match video input_video_mask = input_video_mask.unsqueeze(0).unsqueeze(-1).permute([3, 0, 1, 2]).unsqueeze(0) input_video_mask = torch.tile(input_video_mask, [1, 1, input_video.size()[2], 1, 1]) input_video_mask = input_video_mask.to(input_video.device, input_video.dtype) else: input_video_mask = torch.zeros_like(input_video[:, :1]) input_video_mask[:, :, :] = 255 else: input_video, input_video_mask = None, None if ref_image is not None: # Convert reference image to tensor ref_image = preprocess_image(ref_image, size=sample_size) ref_image = ref_image.permute(1, 0, 2, 3).unsqueeze(0) # Add batch dimension (B, C, H, W) else: ref_image = None return input_video, input_video_mask, ref_image # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): tw = tgt_width th = tgt_height h, w = src r = h / w if r > (th / tw): resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Resize mask information in magvit def resize_mask(mask, latent, process_first_frame_only=True): latent_size = latent.size() if process_first_frame_only: target_size = list(latent_size[2:]) target_size[0] = 1 first_frame_resized = F.interpolate( mask[:, :, 0:1, :, :], size=target_size, mode="trilinear", align_corners=False ) target_size = list(latent_size[2:]) target_size[0] = target_size[0] - 1 if target_size[0] != 0: remaining_frames_resized = F.interpolate( mask[:, :, 1:, :, :], size=target_size, mode="trilinear", align_corners=False ) resized_mask = torch.cat([first_frame_resized, remaining_frames_resized], dim=2) else: resized_mask = first_frame_resized else: target_size = list(latent_size[2:]) resized_mask = F.interpolate(mask, size=target_size, mode="trilinear", align_corners=False) return resized_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class EasyAnimateControlPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using EasyAnimate. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) EasyAnimate uses one text encoder [qwen2 vl](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) in V5.1. Args: vae ([`AutoencoderKLMagvit`]): Variational Auto-Encoder (VAE) Model to encode and decode video to and from latent representations. text_encoder (`~transformers.Qwen2VLForConditionalGeneration`, `~transformers.BertModel` | None): EasyAnimate uses [qwen2 vl](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) in V5.1. tokenizer (`~transformers.Qwen2Tokenizer`, `~transformers.BertTokenizer` | None): A `Qwen2Tokenizer` or `BertTokenizer` to tokenize text. transformer ([`EasyAnimateTransformer3DModel`]): The EasyAnimate model designed by EasyAnimate Team. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with EasyAnimate to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKLMagvit, text_encoder: Qwen2VLForConditionalGeneration | BertModel, tokenizer: Qwen2Tokenizer | BertTokenizer, transformer: EasyAnimateTransformer3DModel, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.enable_text_attention_mask = ( self.transformer.config.enable_text_attention_mask if getattr(self, "transformer", None) is not None else True ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 4 ) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_spatial_compression_ratio, do_normalize=False, do_binarize=True, do_convert_grayscale=True, ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) # Copied from diffusers.pipelines.easyanimate.pipeline_easyanimate.EasyAnimatePipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: str | list[str] | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 256, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device dtype (`torch.dtype`): torch dtype num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds` is passed directly. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. """ dtype = dtype or self.text_encoder.dtype device = device or self.text_encoder.device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(prompt, str): messages = [ { "role": "user", "content": [{"type": "text", "text": prompt}], } ] else: messages = [ { "role": "user", "content": [{"type": "text", "text": _prompt}], } for _prompt in prompt ] text = [ self.tokenizer.apply_chat_template([m], tokenize=False, add_generation_prompt=True) for m in messages ] text_inputs = self.tokenizer( text=text, padding="max_length", max_length=max_sequence_length, truncation=True, return_attention_mask=True, padding_side="right", return_tensors="pt", ) text_inputs = text_inputs.to(self.text_encoder.device) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask if self.enable_text_attention_mask: # Inference: Generation of the output prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True ).hidden_states[-2] else: raise ValueError("LLM needs attention_mask") prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.to(device=device) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: if negative_prompt is not None and isinstance(negative_prompt, str): messages = [ { "role": "user", "content": [{"type": "text", "text": negative_prompt}], } ] else: messages = [ { "role": "user", "content": [{"type": "text", "text": _negative_prompt}], } for _negative_prompt in negative_prompt ] text = [ self.tokenizer.apply_chat_template([m], tokenize=False, add_generation_prompt=True) for m in messages ] text_inputs = self.tokenizer( text=text, padding="max_length", max_length=max_sequence_length, truncation=True, return_attention_mask=True, padding_side="right", return_tensors="pt", ) text_inputs = text_inputs.to(self.text_encoder.device) text_input_ids = text_inputs.input_ids negative_prompt_attention_mask = text_inputs.attention_mask if self.enable_text_attention_mask: # Inference: Generation of the output negative_prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=negative_prompt_attention_mask, output_hidden_states=True, ).hidden_states[-2] else: raise ValueError("LLM needs attention_mask") negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.to(device=device) return prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, callback_on_step_end_tensor_inputs=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, (num_frames - 1) // self.vae_temporal_compression_ratio + 1, height // self.vae_spatial_compression_ratio, width // self.vae_spatial_compression_ratio, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler if hasattr(self.scheduler, "init_noise_sigma"): latents = latents * self.scheduler.init_noise_sigma return latents def prepare_control_latents( self, control, control_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the control to latents shape as we concatenate the control to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision if control is not None: control = control.to(device=device, dtype=dtype) bs = 1 new_control = [] for i in range(0, control.shape[0], bs): control_bs = control[i : i + bs] control_bs = self.vae.encode(control_bs)[0] control_bs = control_bs.mode() new_control.append(control_bs) control = torch.cat(new_control, dim=0) control = control * self.vae.config.scaling_factor if control_image is not None: control_image = control_image.to(device=device, dtype=dtype) bs = 1 new_control_pixel_values = [] for i in range(0, control_image.shape[0], bs): control_pixel_values_bs = control_image[i : i + bs] control_pixel_values_bs = self.vae.encode(control_pixel_values_bs)[0] control_pixel_values_bs = control_pixel_values_bs.mode() new_control_pixel_values.append(control_pixel_values_bs) control_image_latents = torch.cat(new_control_pixel_values, dim=0) control_image_latents = control_image_latents * self.vae.config.scaling_factor else: control_image_latents = None return control, control_image_latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, num_frames: int | None = 49, height: int | None = 512, width: int | None = 512, control_video: torch.FloatTensor = None, control_camera_video: torch.FloatTensor = None, ref_image: torch.FloatTensor = None, num_inference_steps: int | None = 50, guidance_scale: float | None = 5.0, negative_prompt: str | list[str] | None = None, num_images_per_prompt: int | None = 1, eta: float | None = 0.0, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], guidance_rescale: float = 0.0, timesteps: list[int] | None = None, ): r""" Generates images or video using the EasyAnimate pipeline based on the provided prompts. Examples: prompt (`str` or `list[str]`, *optional*): Text prompts to guide the image or video generation. If not provided, use `prompt_embeds` instead. num_frames (`int`, *optional*): Length of the generated video (in frames). height (`int`, *optional*): Height of the generated image in pixels. width (`int`, *optional*): Width of the generated image in pixels. num_inference_steps (`int`, *optional*, defaults to 50): Number of denoising steps during generation. More steps generally yield higher quality images but slow down inference. guidance_scale (`float`, *optional*, defaults to 5.0): Encourages the model to align outputs with prompts. A higher value may decrease image quality. negative_prompt (`str` or `list[str]`, *optional*): Prompts indicating what to exclude in generation. If not specified, use `negative_prompt_embeds`. num_images_per_prompt (`int`, *optional*, defaults to 1): Number of images to generate for each prompt. eta (`float`, *optional*, defaults to 0.0): Applies to DDIM scheduling. Controlled by the eta parameter from the related literature. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A generator to ensure reproducibility in image generation. latents (`torch.Tensor`, *optional*): Predefined latent tensors to condition generation. prompt_embeds (`torch.Tensor`, *optional*): Text embeddings for the prompts. Overrides prompt string inputs for more flexibility. negative_prompt_embeds (`torch.Tensor`, *optional*): Embeddings for negative prompts. Overrides string inputs if defined. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the primary prompt embeddings. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for negative prompt embeddings. output_type (`str`, *optional*, defaults to "latent"): Format of the generated output, either as a PIL image or as a NumPy array. return_dict (`bool`, *optional*, defaults to `True`): If `True`, returns a structured output. Otherwise returns a simple tuple. callback_on_step_end (`Callable`, *optional*): Functions called at the end of each denoising step. callback_on_step_end_tensor_inputs (`list[str]`, *optional*): Tensor names to be included in callback function calls. guidance_rescale (`float`, *optional*, defaults to 0.0): Adjusts noise levels based on guidance scale. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. default height and width height = int((height // 16) * 16) width = int((width // 16) * 16) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = self.transformer.dtype # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, device=device, dtype=dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, text_encoder_index=0, ) # 4. Prepare timesteps if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device if isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler): timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps, mu=1 ) else: timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps ) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.vae.config.latent_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, num_frames, height, width, dtype, device, generator, latents, ) if control_camera_video is not None: control_video_latents = resize_mask(control_camera_video, latents, process_first_frame_only=True) control_video_latents = control_video_latents * 6 control_latents = ( torch.cat([control_video_latents] * 2) if self.do_classifier_free_guidance else control_video_latents ).to(device, dtype) elif control_video is not None: batch_size, channels, num_frames, height_video, width_video = control_video.shape control_video = self.image_processor.preprocess( control_video.permute(0, 2, 1, 3, 4).reshape( batch_size * num_frames, channels, height_video, width_video ), height=height, width=width, ) control_video = control_video.to(dtype=torch.float32) control_video = control_video.reshape(batch_size, num_frames, channels, height, width).permute( 0, 2, 1, 3, 4 ) control_video_latents = self.prepare_control_latents( None, control_video, batch_size, height, width, dtype, device, generator, self.do_classifier_free_guidance, )[1] control_latents = ( torch.cat([control_video_latents] * 2) if self.do_classifier_free_guidance else control_video_latents ).to(device, dtype) else: control_video_latents = torch.zeros_like(latents).to(device, dtype) control_latents = ( torch.cat([control_video_latents] * 2) if self.do_classifier_free_guidance else control_video_latents ).to(device, dtype) if ref_image is not None: batch_size, channels, num_frames, height_video, width_video = ref_image.shape ref_image = self.image_processor.preprocess( ref_image.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height_video, width_video), height=height, width=width, ) ref_image = ref_image.to(dtype=torch.float32) ref_image = ref_image.reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) ref_image_latents = self.prepare_control_latents( None, ref_image, batch_size, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance, )[1] ref_image_latents_conv_in = torch.zeros_like(latents) if latents.size()[2] != 1: ref_image_latents_conv_in[:, :, :1] = ref_image_latents ref_image_latents_conv_in = ( torch.cat([ref_image_latents_conv_in] * 2) if self.do_classifier_free_guidance else ref_image_latents_conv_in ).to(device, dtype) control_latents = torch.cat([control_latents, ref_image_latents_conv_in], dim=1) else: ref_image_latents_conv_in = torch.zeros_like(latents) ref_image_latents_conv_in = ( torch.cat([ref_image_latents_conv_in] * 2) if self.do_classifier_free_guidance else ref_image_latents_conv_in ).to(device, dtype) control_latents = torch.cat([control_latents, ref_image_latents_conv_in], dim=1) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) # To latents.device prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents if hasattr(self.scheduler, "scale_model_input"): latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( dtype=latent_model_input.dtype ) # predict the noise residual noise_pred = self.transformer( latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, control_latents=control_latents, return_dict=False, )[0] if noise_pred.size()[1] != self.vae.config.latent_channels: noise_pred, _ = noise_pred.chunk(2, dim=1) # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # Convert to tensor if not output_type == "latent": video = self.decode_latents(latents) video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return EasyAnimatePipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/easyanimate/pipeline_easyanimate_control.py", "license": "Apache License 2.0", "lines": 879, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/easyanimate/pipeline_easyanimate_inpaint.py
# Copyright 2025 The EasyAnimate team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable import numpy as np import torch import torch.nn.functional as F from PIL import Image from transformers import ( BertModel, BertTokenizer, Qwen2Tokenizer, Qwen2VLForConditionalGeneration, ) from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import VaeImageProcessor from ...models import AutoencoderKLMagvit, EasyAnimateTransformer3DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from .pipeline_output import EasyAnimatePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import EasyAnimateInpaintPipeline >>> from diffusers.pipelines.easyanimate.pipeline_easyanimate_inpaint import get_image_to_video_latent >>> from diffusers.utils import export_to_video, load_image >>> pipe = EasyAnimateInpaintPipeline.from_pretrained( ... "alibaba-pai/EasyAnimateV5.1-12b-zh-InP-diffusers", torch_dtype=torch.bfloat16 ... ) >>> pipe.to("cuda") >>> prompt = "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot." >>> validation_image_start = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg" ... ) >>> validation_image_end = None >>> sample_size = (448, 576) >>> num_frames = 49 >>> input_video, input_video_mask = get_image_to_video_latent( ... [validation_image_start], validation_image_end, num_frames, sample_size ... ) >>> video = pipe( ... prompt, ... num_frames=num_frames, ... negative_prompt="Twisted body, limb deformities, text subtitles, comics, stillness, ugliness, errors, garbled text.", ... height=sample_size[0], ... width=sample_size[1], ... video=input_video, ... mask_video=input_video_mask, ... ) >>> export_to_video(video.frames[0], "output.mp4", fps=8) ``` """ def preprocess_image(image, sample_size): """ Preprocess a single image (PIL.Image, numpy.ndarray, or torch.Tensor) to a resized tensor. """ if isinstance(image, torch.Tensor): # If input is a tensor, assume it's in CHW format and resize using interpolation image = torch.nn.functional.interpolate( image.unsqueeze(0), size=sample_size, mode="bilinear", align_corners=False ).squeeze(0) elif isinstance(image, Image.Image): # If input is a PIL image, resize and convert to numpy array image = image.resize((sample_size[1], sample_size[0])) image = np.array(image) elif isinstance(image, np.ndarray): # If input is a numpy array, resize using PIL image = Image.fromarray(image).resize((sample_size[1], sample_size[0])) image = np.array(image) else: raise ValueError("Unsupported input type. Expected PIL.Image, numpy.ndarray, or torch.Tensor.") # Convert to tensor if not already if not isinstance(image, torch.Tensor): image = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0 # HWC -> CHW, normalize to [0, 1] return image def get_image_to_video_latent(validation_image_start, validation_image_end, num_frames, sample_size): """ Generate latent representations for video from start and end images. Inputs can be PIL.Image, numpy.ndarray, or torch.Tensor. """ input_video = None input_video_mask = None if validation_image_start is not None: # Preprocess the starting image(s) if isinstance(validation_image_start, list): image_start = [preprocess_image(img, sample_size) for img in validation_image_start] else: image_start = preprocess_image(validation_image_start, sample_size) # Create video tensor from the starting image(s) if isinstance(image_start, list): start_video = torch.cat( [img.unsqueeze(1).unsqueeze(0) for img in image_start], dim=2, ) input_video = torch.tile(start_video[:, :, :1], [1, 1, num_frames, 1, 1]) input_video[:, :, : len(image_start)] = start_video else: input_video = torch.tile( image_start.unsqueeze(1).unsqueeze(0), [1, 1, num_frames, 1, 1], ) # Normalize input video (already normalized in preprocess_image) # Create mask for the input video input_video_mask = torch.zeros_like(input_video[:, :1]) if isinstance(image_start, list): input_video_mask[:, :, len(image_start) :] = 255 else: input_video_mask[:, :, 1:] = 255 # Handle ending image(s) if provided if validation_image_end is not None: if isinstance(validation_image_end, list): image_end = [preprocess_image(img, sample_size) for img in validation_image_end] end_video = torch.cat( [img.unsqueeze(1).unsqueeze(0) for img in image_end], dim=2, ) input_video[:, :, -len(end_video) :] = end_video input_video_mask[:, :, -len(image_end) :] = 0 else: image_end = preprocess_image(validation_image_end, sample_size) input_video[:, :, -1:] = image_end.unsqueeze(1).unsqueeze(0) input_video_mask[:, :, -1:] = 0 elif validation_image_start is None: # If no starting image is provided, initialize empty tensors input_video = torch.zeros([1, 3, num_frames, sample_size[0], sample_size[1]]) input_video_mask = torch.ones([1, 1, num_frames, sample_size[0], sample_size[1]]) * 255 return input_video, input_video_mask # Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): tw = tgt_width th = tgt_height h, w = src r = h / w if r > (th / tw): resize_height = th resize_width = int(round(th / h * w)) else: resize_width = tw resize_height = int(round(tw / w * h)) crop_top = int(round((th - resize_height) / 2.0)) crop_left = int(round((tw - resize_width) / 2.0)) return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Resize mask information in magvit def resize_mask(mask, latent, process_first_frame_only=True): latent_size = latent.size() if process_first_frame_only: target_size = list(latent_size[2:]) target_size[0] = 1 first_frame_resized = F.interpolate( mask[:, :, 0:1, :, :], size=target_size, mode="trilinear", align_corners=False ) target_size = list(latent_size[2:]) target_size[0] = target_size[0] - 1 if target_size[0] != 0: remaining_frames_resized = F.interpolate( mask[:, :, 1:, :, :], size=target_size, mode="trilinear", align_corners=False ) resized_mask = torch.cat([first_frame_resized, remaining_frames_resized], dim=2) else: resized_mask = first_frame_resized else: target_size = list(latent_size[2:]) resized_mask = F.interpolate(mask, size=target_size, mode="trilinear", align_corners=False) return resized_mask ## Add noise to reference video def add_noise_to_reference_video(image, ratio=None, generator=None): if ratio is None: sigma = torch.normal(mean=-3.0, std=0.5, size=(image.shape[0],)).to(image.device) sigma = torch.exp(sigma).to(image.dtype) else: sigma = torch.ones((image.shape[0],)).to(image.device, image.dtype) * ratio if generator is not None: image_noise = ( torch.randn(image.size(), generator=generator, dtype=image.dtype, device=image.device) * sigma[:, None, None, None, None] ) else: image_noise = torch.randn_like(image) * sigma[:, None, None, None, None] image_noise = torch.where(image == -1, torch.zeros_like(image), image_noise) image = image + image_noise return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class EasyAnimateInpaintPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using EasyAnimate. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) EasyAnimate uses one text encoder [qwen2 vl](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) in V5.1. Args: vae ([`AutoencoderKLMagvit`]): Variational Auto-Encoder (VAE) Model to encode and decode video to and from latent representations. text_encoder (`~transformers.Qwen2VLForConditionalGeneration`, `~transformers.BertModel` | None): EasyAnimate uses [qwen2 vl](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct) in V5.1. tokenizer (`~transformers.Qwen2Tokenizer`, `~transformers.BertTokenizer` | None): A `Qwen2Tokenizer` or `BertTokenizer` to tokenize text. transformer ([`EasyAnimateTransformer3DModel`]): The EasyAnimate model designed by EasyAnimate Team. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with EasyAnimate to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKLMagvit, text_encoder: Qwen2VLForConditionalGeneration | BertModel, tokenizer: Qwen2Tokenizer | BertTokenizer, transformer: EasyAnimateTransformer3DModel, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.enable_text_attention_mask = ( self.transformer.config.enable_text_attention_mask if getattr(self, "transformer", None) is not None else True ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 4 ) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_spatial_compression_ratio, do_normalize=False, do_binarize=True, do_convert_grayscale=True, ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) # Copied from diffusers.pipelines.easyanimate.pipeline_easyanimate.EasyAnimatePipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: str | list[str] | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 256, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device dtype (`torch.dtype`): torch dtype num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the prompt. Required when `prompt_embeds` is passed directly. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt. Required when `negative_prompt_embeds` is passed directly. max_sequence_length (`int`, *optional*): maximum sequence length to use for the prompt. """ dtype = dtype or self.text_encoder.dtype device = device or self.text_encoder.device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: if isinstance(prompt, str): messages = [ { "role": "user", "content": [{"type": "text", "text": prompt}], } ] else: messages = [ { "role": "user", "content": [{"type": "text", "text": _prompt}], } for _prompt in prompt ] text = [ self.tokenizer.apply_chat_template([m], tokenize=False, add_generation_prompt=True) for m in messages ] text_inputs = self.tokenizer( text=text, padding="max_length", max_length=max_sequence_length, truncation=True, return_attention_mask=True, padding_side="right", return_tensors="pt", ) text_inputs = text_inputs.to(self.text_encoder.device) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask if self.enable_text_attention_mask: # Inference: Generation of the output prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True ).hidden_states[-2] else: raise ValueError("LLM needs attention_mask") prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.to(device=device) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: if negative_prompt is not None and isinstance(negative_prompt, str): messages = [ { "role": "user", "content": [{"type": "text", "text": negative_prompt}], } ] else: messages = [ { "role": "user", "content": [{"type": "text", "text": _negative_prompt}], } for _negative_prompt in negative_prompt ] text = [ self.tokenizer.apply_chat_template([m], tokenize=False, add_generation_prompt=True) for m in messages ] text_inputs = self.tokenizer( text=text, padding="max_length", max_length=max_sequence_length, truncation=True, return_attention_mask=True, padding_side="right", return_tensors="pt", ) text_inputs = text_inputs.to(self.text_encoder.device) text_input_ids = text_inputs.input_ids negative_prompt_attention_mask = text_inputs.attention_mask if self.enable_text_attention_mask: # Inference: Generation of the output negative_prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=negative_prompt_attention_mask, output_hidden_states=True, ).hidden_states[-2] else: raise ValueError("LLM needs attention_mask") negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.to(device=device) return prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, callback_on_step_end_tensor_inputs=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance, noise_aug_strength, ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision if mask is not None: mask = mask.to(device=device, dtype=dtype) new_mask = [] bs = 1 for i in range(0, mask.shape[0], bs): mask_bs = mask[i : i + bs] mask_bs = self.vae.encode(mask_bs)[0] mask_bs = mask_bs.mode() new_mask.append(mask_bs) mask = torch.cat(new_mask, dim=0) mask = mask * self.vae.config.scaling_factor if masked_image is not None: masked_image = masked_image.to(device=device, dtype=dtype) if self.transformer.config.add_noise_in_inpaint_model: masked_image = add_noise_to_reference_video( masked_image, ratio=noise_aug_strength, generator=generator ) new_mask_pixel_values = [] bs = 1 for i in range(0, masked_image.shape[0], bs): mask_pixel_values_bs = masked_image[i : i + bs] mask_pixel_values_bs = self.vae.encode(mask_pixel_values_bs)[0] mask_pixel_values_bs = mask_pixel_values_bs.mode() new_mask_pixel_values.append(mask_pixel_values_bs) masked_image_latents = torch.cat(new_mask_pixel_values, dim=0) masked_image_latents = masked_image_latents * self.vae.config.scaling_factor # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) else: masked_image_latents = None return mask, masked_image_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, num_frames, dtype, device, generator, latents=None, video=None, timestep=None, is_strength_max=True, return_noise=False, return_video_latents=False, ): shape = ( batch_size, num_channels_latents, (num_frames - 1) // self.vae_temporal_compression_ratio + 1, height // self.vae_spatial_compression_ratio, width // self.vae_spatial_compression_ratio, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if return_video_latents or (latents is None and not is_strength_max): video = video.to(device=device, dtype=dtype) bs = 1 new_video = [] for i in range(0, video.shape[0], bs): video_bs = video[i : i + bs] video_bs = self.vae.encode(video_bs)[0] video_bs = video_bs.sample() new_video.append(video_bs) video = torch.cat(new_video, dim=0) video = video * self.vae.config.scaling_factor video_latents = video.repeat(batch_size // video.shape[0], 1, 1, 1, 1) video_latents = video_latents.to(device=device, dtype=dtype) if latents is None: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise if isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler): latents = noise if is_strength_max else self.scheduler.scale_noise(video_latents, timestep, noise) else: latents = noise if is_strength_max else self.scheduler.add_noise(video_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma if hasattr(self.scheduler, "init_noise_sigma"): latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents else: if hasattr(self.scheduler, "init_noise_sigma"): noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler outputs = (latents,) if return_noise: outputs += (noise,) if return_video_latents: outputs += (video_latents,) return outputs @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, num_frames: int | None = 49, video: torch.FloatTensor = None, mask_video: torch.FloatTensor = None, masked_video_latents: torch.FloatTensor = None, height: int | None = 512, width: int | None = 512, num_inference_steps: int | None = 50, guidance_scale: float | None = 5.0, negative_prompt: str | list[str] | None = None, num_images_per_prompt: int | None = 1, eta: float | None = 0.0, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], guidance_rescale: float = 0.0, strength: float = 1.0, noise_aug_strength: float = 0.0563, timesteps: list[int] | None = None, ): r""" The call function to the pipeline for generation with HunyuanDiT. Examples: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. num_frames (`int`, *optional*): Length of the video to be generated in seconds. This parameter influences the number of frames and continuity of generated content. video (`torch.FloatTensor`, *optional*): A tensor representing an input video, which can be modified depending on the prompts provided. mask_video (`torch.FloatTensor`, *optional*): A tensor to specify areas of the video to be masked (omitted from generation). masked_video_latents (`torch.FloatTensor`, *optional*): Latents from masked portions of the video, utilized during image generation. height (`int`, *optional*): The height in pixels of the generated image or video frames. width (`int`, *optional*): The width in pixels of the generated image or video frames. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image but slower inference time. This parameter is modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 5.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is effective when `guidance_scale > 1`. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide what to exclude in image generation. If not defined, you need to provide `negative_prompt_embeds`. This parameter is ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): A parameter defined in the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`] and is ignored in other schedulers. It adjusts noise level during the inference process. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) for setting random seeds which helps in making generation deterministic. latents (`torch.Tensor`, *optional*): A pre-computed latent representation which can be used to guide the generation process. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings, aiding in fine-tuning what should not be represented in the outputs. If not provided, embeddings are generated from the `negative_prompt` argument. prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask guiding the focus of the model on specific parts of the prompt text. Required when using `prompt_embeds`. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Attention mask for the negative prompt, needed when `negative_prompt_embeds` are used. output_type (`str`, *optional*, defaults to `"latent"`): The output format of the generated image. Choose between `PIL.Image` and `np.array` to define how you want the results to be formatted. return_dict (`bool`, *optional*, defaults to `True`): If set to `True`, a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] will be returned; otherwise, a tuple containing the generated images and safety flags will be returned. callback_on_step_end (`Callable[[int, int], None]`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A callback function (or a list of them) that will be executed at the end of each denoising step, allowing for custom processing during generation. callback_on_step_end_tensor_inputs (`list[str]`, *optional*): Specifies which tensor inputs should be included in the callback function. If not defined, all tensor inputs will be passed, facilitating enhanced logging or monitoring of the generation process. guidance_rescale (`float`, *optional*, defaults to 0.0): Rescale parameter for adjusting noise configuration based on guidance rescale. Based on findings from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). strength (`float`, *optional*, defaults to 1.0): Affects the overall styling or quality of the generated output. Values closer to 1 usually provide direct adherence to prompts. Examples: # Example usage of the function for generating images based on prompts. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: Returns either a structured output containing generated images and their metadata when `return_dict` is `True`, or a simpler tuple, where the first element is a list of generated images and the second element indicates if any of them contain "not-safe-for-work" (NSFW) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. default height and width height = int(height // 16 * 16) width = int(width // 16 * 16) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = self.transformer.dtype # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, device=device, dtype=dtype, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) # 4. set timesteps if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device if isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler): timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps, mu=1 ) else: timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps ) timesteps, num_inference_steps = self.get_timesteps( num_inference_steps=num_inference_steps, strength=strength, device=device ) # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise is_strength_max = strength == 1.0 if video is not None: batch_size, channels, num_frames, height_video, width_video = video.shape init_video = self.image_processor.preprocess( video.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height_video, width_video), height=height, width=width, ) init_video = init_video.to(dtype=torch.float32) init_video = init_video.reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) else: init_video = None # Prepare latent variables num_channels_latents = self.vae.config.latent_channels num_channels_transformer = self.transformer.config.in_channels return_image_latents = num_channels_transformer == num_channels_latents # 5. Prepare latents. latents_outputs = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, num_frames, dtype, device, generator, latents, video=init_video, timestep=latent_timestep, is_strength_max=is_strength_max, return_noise=True, return_video_latents=return_image_latents, ) if return_image_latents: latents, noise, image_latents = latents_outputs else: latents, noise = latents_outputs # 6. Prepare inpaint latents if it needs. if mask_video is not None: if (mask_video == 255).all(): mask = torch.zeros_like(latents).to(device, dtype) # Use zero latents if we want to t2v. if self.transformer.config.resize_inpaint_mask_directly: mask_latents = torch.zeros_like(latents)[:, :1].to(device, dtype) else: mask_latents = torch.zeros_like(latents).to(device, dtype) masked_video_latents = torch.zeros_like(latents).to(device, dtype) mask_input = torch.cat([mask_latents] * 2) if self.do_classifier_free_guidance else mask_latents masked_video_latents_input = ( torch.cat([masked_video_latents] * 2) if self.do_classifier_free_guidance else masked_video_latents ) inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=1).to(dtype) else: # Prepare mask latent variables batch_size, channels, num_frames, height_video, width_video = mask_video.shape mask_condition = self.mask_processor.preprocess( mask_video.permute(0, 2, 1, 3, 4).reshape( batch_size * num_frames, channels, height_video, width_video ), height=height, width=width, ) mask_condition = mask_condition.to(dtype=torch.float32) mask_condition = mask_condition.reshape(batch_size, num_frames, channels, height, width).permute( 0, 2, 1, 3, 4 ) if num_channels_transformer != num_channels_latents: mask_condition_tile = torch.tile(mask_condition, [1, 3, 1, 1, 1]) if masked_video_latents is None: masked_video = ( init_video * (mask_condition_tile < 0.5) + torch.ones_like(init_video) * (mask_condition_tile > 0.5) * -1 ) else: masked_video = masked_video_latents if self.transformer.config.resize_inpaint_mask_directly: _, masked_video_latents = self.prepare_mask_latents( None, masked_video, batch_size, height, width, dtype, device, generator, self.do_classifier_free_guidance, noise_aug_strength=noise_aug_strength, ) mask_latents = resize_mask( 1 - mask_condition, masked_video_latents, self.vae.config.cache_mag_vae ) mask_latents = mask_latents.to(device, dtype) * self.vae.config.scaling_factor else: mask_latents, masked_video_latents = self.prepare_mask_latents( mask_condition_tile, masked_video, batch_size, height, width, dtype, device, generator, self.do_classifier_free_guidance, noise_aug_strength=noise_aug_strength, ) mask_input = torch.cat([mask_latents] * 2) if self.do_classifier_free_guidance else mask_latents masked_video_latents_input = ( torch.cat([masked_video_latents] * 2) if self.do_classifier_free_guidance else masked_video_latents ) inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=1).to(dtype) else: inpaint_latents = None mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1]) mask = F.interpolate(mask, size=latents.size()[-3:], mode="trilinear", align_corners=True).to( device, dtype ) else: if num_channels_transformer != num_channels_latents: mask = torch.zeros_like(latents).to(device, dtype) if self.transformer.config.resize_inpaint_mask_directly: mask_latents = torch.zeros_like(latents)[:, :1].to(device, dtype) else: mask_latents = torch.zeros_like(latents).to(device, dtype) masked_video_latents = torch.zeros_like(latents).to(device, dtype) mask_input = torch.cat([mask_latents] * 2) if self.do_classifier_free_guidance else mask_latents masked_video_latents_input = ( torch.cat([masked_video_latents] * 2) if self.do_classifier_free_guidance else masked_video_latents ) inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=1).to(dtype) else: mask = torch.zeros_like(init_video[:, :1]) mask = torch.tile(mask, [1, num_channels_latents, 1, 1, 1]) mask = F.interpolate(mask, size=latents.size()[-3:], mode="trilinear", align_corners=True).to( device, dtype ) inpaint_latents = None # Check that sizes of mask, masked image and latents match if num_channels_transformer != num_channels_latents: num_channels_mask = mask_latents.shape[1] num_channels_masked_image = masked_video_latents.shape[1] if ( num_channels_latents + num_channels_mask + num_channels_masked_image != self.transformer.config.in_channels ): raise ValueError( f"Incorrect configuration settings! The config of `pipeline.transformer`: {self.transformer.config} expects" f" {self.transformer.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of" " `pipeline.transformer` or your `mask_image` or `image` input." ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask]) # To latents.device prompt_embeds = prompt_embeds.to(device=device) prompt_attention_mask = prompt_attention_mask.to(device=device) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents if hasattr(self.scheduler, "scale_model_input"): latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # expand scalar t to 1-D tensor to match the 1st dim of latent_model_input t_expand = torch.tensor([t] * latent_model_input.shape[0], device=device).to( dtype=latent_model_input.dtype ) # predict the noise residual noise_pred = self.transformer( latent_model_input, t_expand, encoder_hidden_states=prompt_embeds, inpaint_latents=inpaint_latents, return_dict=False, )[0] if noise_pred.size()[1] != self.vae.config.latent_channels: noise_pred, _ = noise_pred.chunk(2, dim=1) # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if num_channels_transformer == num_channels_latents: init_latents_proper = image_latents init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] if isinstance(self.scheduler, FlowMatchEulerDiscreteScheduler): init_latents_proper = self.scheduler.scale_noise( init_latents_proper, torch.tensor([noise_timestep], noise) ) else: init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": latents = 1 / self.vae.config.scaling_factor * latents video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video=video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return EasyAnimatePipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/easyanimate/pipeline_easyanimate_inpaint.py", "license": "Apache License 2.0", "lines": 1095, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/easyanimate/pipeline_output.py
from dataclasses import dataclass import torch from diffusers.utils import BaseOutput @dataclass class EasyAnimatePipelineOutput(BaseOutput): r""" Output class for EasyAnimate pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or list[list[PIL.Image.Image]]): list of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: torch.Tensor
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/easyanimate/pipeline_output.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:tests/models/autoencoders/test_models_autoencoder_magvit.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers import AutoencoderKLMagvit from ...testing_utils import enable_full_determinism, floats_tensor, torch_device from ..test_modeling_common import ModelTesterMixin from .testing_utils import AutoencoderTesterMixin enable_full_determinism() class AutoencoderKLMagvitTests(ModelTesterMixin, AutoencoderTesterMixin, unittest.TestCase): model_class = AutoencoderKLMagvit main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_magvit_config(self): return { "in_channels": 3, "latent_channels": 4, "out_channels": 3, "block_out_channels": [8, 8, 8, 8], "down_block_types": [ "SpatialDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", ], "up_block_types": [ "SpatialUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", ], "layers_per_block": 1, "norm_num_groups": 8, "spatial_group_norm": True, } @property def dummy_input(self): batch_size = 2 num_frames = 9 num_channels = 3 height = 16 width = 16 image = floats_tensor((batch_size, num_channels, num_frames, height, width)).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 9, 16, 16) @property def output_shape(self): return (3, 9, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_magvit_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"EasyAnimateEncoder", "EasyAnimateDecoder"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) @unittest.skip("Not quite sure why this test fails. Revisit later.") def test_effective_gradient_checkpointing(self): pass @unittest.skip("Unsupported test.") def test_forward_with_norm_groups(self): pass @unittest.skip( "Unsupported test. Error: RuntimeError: Sizes of tensors must match except in dimension 0. Expected size 9 but got size 12 for tensor number 1 in the list." ) def test_enable_disable_slicing(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/autoencoders/test_models_autoencoder_magvit.py", "license": "Apache License 2.0", "lines": 79, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/models/transformers/test_models_transformer_easyanimate.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import EasyAnimateTransformer3DModel from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class EasyAnimateTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = EasyAnimateTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 2 num_channels = 4 num_frames = 2 height = 16 width = 16 embedding_dim = 16 sequence_length = 16 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "timestep": timestep, "timestep_cond": None, "encoder_hidden_states": encoder_hidden_states, "encoder_hidden_states_t5": None, "inpaint_latents": None, "control_latents": None, } @property def input_shape(self): return (4, 2, 16, 16) @property def output_shape(self): return (4, 2, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "attention_head_dim": 16, "num_attention_heads": 2, "in_channels": 4, "mmdit_layers": 2, "num_layers": 2, "out_channels": 4, "patch_size": 2, "sample_height": 60, "sample_width": 90, "text_embed_dim": 16, "time_embed_dim": 8, "time_position_encoding_type": "3d_rope", "timestep_activation_fn": "silu", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"EasyAnimateTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_easyanimate.py", "license": "Apache License 2.0", "lines": 72, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/easyanimate/test_easyanimate.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import unittest import numpy as np import torch from transformers import Qwen2Tokenizer, Qwen2VLForConditionalGeneration from diffusers import ( AutoencoderKLMagvit, EasyAnimatePipeline, EasyAnimateTransformer3DModel, FlowMatchEulerDiscreteScheduler, ) from ...testing_utils import ( backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class EasyAnimatePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = EasyAnimatePipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS test_xformers_attention = False required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) transformer = EasyAnimateTransformer3DModel( num_attention_heads=2, attention_head_dim=16, in_channels=4, out_channels=4, time_embed_dim=2, text_embed_dim=16, # Must match with tiny-random-t5 num_layers=1, sample_width=16, # latent width: 2 -> final width: 16 sample_height=16, # latent height: 2 -> final height: 16 patch_size=2, ) torch.manual_seed(0) vae = AutoencoderKLMagvit( in_channels=3, out_channels=3, down_block_types=( "SpatialDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", "SpatialTemporalDownBlock3D", ), up_block_types=( "SpatialUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", "SpatialTemporalUpBlock3D", ), block_out_channels=(8, 8, 8, 8), latent_channels=4, layers_per_block=1, norm_num_groups=2, spatial_group_norm=False, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler() text_encoder = Qwen2VLForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration" ) tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 16, "width": 16, "num_frames": 5, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (5, 3, 16, 16)) expected_video = torch.randn(5, 3, 16, 16) max_diff = np.abs(generated_video - expected_video).max() self.assertLessEqual(max_diff, 1e10) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_dict_tuple_outputs_equivalent(self, expected_slice=None, expected_max_difference=0.001): # Seems to need a higher tolerance return super().test_dict_tuple_outputs_equivalent(expected_slice, expected_max_difference) def test_encode_prompt_works_in_isolation(self): # Seems to need a higher tolerance return super().test_encode_prompt_works_in_isolation(atol=1e-3, rtol=1e-3) @slow @require_torch_accelerator class EasyAnimatePipelineIntegrationTests(unittest.TestCase): prompt = "A painting of a squirrel eating a burger." def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def test_EasyAnimate(self): generator = torch.Generator("cpu").manual_seed(0) pipe = EasyAnimatePipeline.from_pretrained("alibaba-pai/EasyAnimateV5.1-12b-zh", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt videos = pipe( prompt=prompt, height=480, width=720, num_frames=5, generator=generator, num_inference_steps=2, output_type="pt", ).frames video = videos[0] expected_video = torch.randn(1, 5, 480, 720, 3).numpy() max_diff = numpy_cosine_similarity_distance(video, expected_video) assert max_diff < 1e-3, f"Max diff is too high. got {video}"
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/easyanimate/test_easyanimate.py", "license": "Apache License 2.0", "lines": 247, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/utils/remote_utils.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import io import json from typing import Literal, cast import requests from .deprecation_utils import deprecate from .import_utils import is_safetensors_available, is_torch_available if is_torch_available(): import torch from ..image_processor import VaeImageProcessor from ..video_processor import VideoProcessor if is_safetensors_available(): import safetensors.torch DTYPE_MAP = { "float16": torch.float16, "float32": torch.float32, "bfloat16": torch.bfloat16, "uint8": torch.uint8, } from PIL import Image def detect_image_type(data: bytes) -> str: if data.startswith(b"\xff\xd8"): return "jpeg" elif data.startswith(b"\x89PNG\r\n\x1a\n"): return "png" elif data.startswith(b"GIF87a") or data.startswith(b"GIF89a"): return "gif" elif data.startswith(b"BM"): return "bmp" return "unknown" def check_inputs_decode( endpoint: str, tensor: "torch.Tensor", processor: "VaeImageProcessor" | "VideoProcessor" | None = None, do_scaling: bool = True, scaling_factor: float | None = None, shift_factor: float | None = None, output_type: Literal["mp4", "pil", "pt"] = "pil", return_type: Literal["mp4", "pil", "pt"] = "pil", image_format: Literal["png", "jpg"] = "jpg", partial_postprocess: bool = False, input_tensor_type: Literal["binary"] = "binary", output_tensor_type: Literal["binary"] = "binary", height: int | None = None, width: int | None = None, ): if tensor.ndim == 3 and height is None and width is None: raise ValueError("`height` and `width` required for packed latents.") if ( output_type == "pt" and return_type == "pil" and not partial_postprocess and not isinstance(processor, (VaeImageProcessor, VideoProcessor)) ): raise ValueError("`processor` is required.") if do_scaling and scaling_factor is None: deprecate( "do_scaling", "1.0.0", "`do_scaling` is deprecated, pass `scaling_factor` and `shift_factor` if required.", standard_warn=False, ) def postprocess_decode( response: requests.Response, processor: "VaeImageProcessor" | "VideoProcessor" | None = None, output_type: Literal["mp4", "pil", "pt"] = "pil", return_type: Literal["mp4", "pil", "pt"] = "pil", partial_postprocess: bool = False, ): if output_type == "pt" or (output_type == "pil" and processor is not None): output_tensor = response.content parameters = response.headers shape = json.loads(parameters["shape"]) dtype = parameters["dtype"] torch_dtype = DTYPE_MAP[dtype] output_tensor = torch.frombuffer(bytearray(output_tensor), dtype=torch_dtype).reshape(shape) if output_type == "pt": if partial_postprocess: if return_type == "pil": output = [Image.fromarray(image.numpy()) for image in output_tensor] if len(output) == 1: output = output[0] elif return_type == "pt": output = output_tensor else: if processor is None or return_type == "pt": output = output_tensor else: if isinstance(processor, VideoProcessor): output = cast( list[Image.Image], processor.postprocess_video(output_tensor, output_type="pil")[0], ) else: output = cast( Image.Image, processor.postprocess(output_tensor, output_type="pil")[0], ) elif output_type == "pil" and return_type == "pil" and processor is None: output = Image.open(io.BytesIO(response.content)).convert("RGB") detected_format = detect_image_type(response.content) output.format = detected_format elif output_type == "pil" and processor is not None: if return_type == "pil": output = [ Image.fromarray(image) for image in (output_tensor.permute(0, 2, 3, 1).float().numpy() * 255).round().astype("uint8") ] elif return_type == "pt": output = output_tensor elif output_type == "mp4" and return_type == "mp4": output = response.content return output def prepare_decode( tensor: "torch.Tensor", processor: "VaeImageProcessor" | "VideoProcessor" | None = None, do_scaling: bool = True, scaling_factor: float | None = None, shift_factor: float | None = None, output_type: Literal["mp4", "pil", "pt"] = "pil", image_format: Literal["png", "jpg"] = "jpg", partial_postprocess: bool = False, height: int | None = None, width: int | None = None, ): headers = {} parameters = { "image_format": image_format, "output_type": output_type, "partial_postprocess": partial_postprocess, "shape": list(tensor.shape), "dtype": str(tensor.dtype).split(".")[-1], } if do_scaling and scaling_factor is not None: parameters["scaling_factor"] = scaling_factor if do_scaling and shift_factor is not None: parameters["shift_factor"] = shift_factor if do_scaling and scaling_factor is None: parameters["do_scaling"] = do_scaling elif do_scaling and scaling_factor is None and shift_factor is None: parameters["do_scaling"] = do_scaling if height is not None and width is not None: parameters["height"] = height parameters["width"] = width headers["Content-Type"] = "tensor/binary" headers["Accept"] = "tensor/binary" if output_type == "pil" and image_format == "jpg" and processor is None: headers["Accept"] = "image/jpeg" elif output_type == "pil" and image_format == "png" and processor is None: headers["Accept"] = "image/png" elif output_type == "mp4": headers["Accept"] = "text/plain" tensor_data = safetensors.torch._tobytes(tensor, "tensor") return {"data": tensor_data, "params": parameters, "headers": headers} def remote_decode( endpoint: str, tensor: "torch.Tensor", processor: "VaeImageProcessor" | "VideoProcessor" | None = None, do_scaling: bool = True, scaling_factor: float | None = None, shift_factor: float | None = None, output_type: Literal["mp4", "pil", "pt"] = "pil", return_type: Literal["mp4", "pil", "pt"] = "pil", image_format: Literal["png", "jpg"] = "jpg", partial_postprocess: bool = False, input_tensor_type: Literal["binary"] = "binary", output_tensor_type: Literal["binary"] = "binary", height: int | None = None, width: int | None = None, ) -> Image.Image | list[Image.Image] | bytes | "torch.Tensor": """ Hugging Face Hybrid Inference that allow running VAE decode remotely. Args: endpoint (`str`): Endpoint for Remote Decode. tensor (`torch.Tensor`): Tensor to be decoded. processor (`VaeImageProcessor` or `VideoProcessor`, *optional*): Used with `return_type="pt"`, and `return_type="pil"` for Video models. do_scaling (`bool`, default `True`, *optional*): **DEPRECATED**. **pass `scaling_factor`/`shift_factor` instead.** **still set do_scaling=None/do_scaling=False for no scaling until option is removed** When `True` scaling e.g. `latents / self.vae.config.scaling_factor` is applied remotely. If `False`, input must be passed with scaling applied. scaling_factor (`float`, *optional*): Scaling is applied when passed e.g. [`latents / self.vae.config.scaling_factor`](https://github.com/huggingface/diffusers/blob/7007febae5cff000d4df9059d9cf35133e8b2ca9/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L1083C37-L1083C77). - SD v1: 0.18215 - SD XL: 0.13025 - Flux: 0.3611 If `None`, input must be passed with scaling applied. shift_factor (`float`, *optional*): Shift is applied when passed e.g. `latents + self.vae.config.shift_factor`. - Flux: 0.1159 If `None`, input must be passed with scaling applied. output_type (`"mp4"` or `"pil"` or `"pt", default `"pil"): **Endpoint** output type. Subject to change. Report feedback on preferred type. `"mp4": Supported by video models. Endpoint returns `bytes` of video. `"pil"`: Supported by image and video models. Image models: Endpoint returns `bytes` of an image in `image_format`. Video models: Endpoint returns `torch.Tensor` with partial `postprocessing` applied. Requires `processor` as a flag (any `None` value will work). `"pt"`: Support by image and video models. Endpoint returns `torch.Tensor`. With `partial_postprocess=True` the tensor is postprocessed `uint8` image tensor. Recommendations: `"pt"` with `partial_postprocess=True` is the smallest transfer for full quality. `"pt"` with `partial_postprocess=False` is the most compatible with third party code. `"pil"` with `image_format="jpg"` is the smallest transfer overall. return_type (`"mp4"` or `"pil"` or `"pt", default `"pil"): **Function** return type. `"mp4": Function returns `bytes` of video. `"pil"`: Function returns `PIL.Image.Image`. With `output_type="pil" no further processing is applied. With `output_type="pt" a `PIL.Image.Image` is created. `partial_postprocess=False` `processor` is required. `partial_postprocess=True` `processor` is **not** required. `"pt"`: Function returns `torch.Tensor`. `processor` is **not** required. `partial_postprocess=False` tensor is `float16` or `bfloat16`, without denormalization. `partial_postprocess=True` tensor is `uint8`, denormalized. image_format (`"png"` or `"jpg"`, default `jpg`): Used with `output_type="pil"`. Endpoint returns `jpg` or `png`. partial_postprocess (`bool`, default `False`): Used with `output_type="pt"`. `partial_postprocess=False` tensor is `float16` or `bfloat16`, without denormalization. `partial_postprocess=True` tensor is `uint8`, denormalized. input_tensor_type (`"binary"`, default `"binary"`): Tensor transfer type. output_tensor_type (`"binary"`, default `"binary"`): Tensor transfer type. height (`int`, **optional**): Required for `"packed"` latents. width (`int`, **optional**): Required for `"packed"` latents. Returns: output (`Image.Image` or `list[Image.Image]` or `bytes` or `torch.Tensor`). """ if input_tensor_type == "base64": deprecate( "input_tensor_type='base64'", "1.0.0", "input_tensor_type='base64' is deprecated. Using `binary`.", standard_warn=False, ) input_tensor_type = "binary" if output_tensor_type == "base64": deprecate( "output_tensor_type='base64'", "1.0.0", "output_tensor_type='base64' is deprecated. Using `binary`.", standard_warn=False, ) output_tensor_type = "binary" check_inputs_decode( endpoint, tensor, processor, do_scaling, scaling_factor, shift_factor, output_type, return_type, image_format, partial_postprocess, input_tensor_type, output_tensor_type, height, width, ) kwargs = prepare_decode( tensor=tensor, processor=processor, do_scaling=do_scaling, scaling_factor=scaling_factor, shift_factor=shift_factor, output_type=output_type, image_format=image_format, partial_postprocess=partial_postprocess, height=height, width=width, ) response = requests.post(endpoint, **kwargs) if not response.ok: raise RuntimeError(response.json()) output = postprocess_decode( response=response, processor=processor, output_type=output_type, return_type=return_type, partial_postprocess=partial_postprocess, ) return output def check_inputs_encode( endpoint: str, image: "torch.Tensor" | Image.Image, scaling_factor: float | None = None, shift_factor: float | None = None, ): pass def postprocess_encode( response: requests.Response, ): output_tensor = response.content parameters = response.headers shape = json.loads(parameters["shape"]) dtype = parameters["dtype"] torch_dtype = DTYPE_MAP[dtype] output_tensor = torch.frombuffer(bytearray(output_tensor), dtype=torch_dtype).reshape(shape) return output_tensor def prepare_encode( image: "torch.Tensor" | Image.Image, scaling_factor: float | None = None, shift_factor: float | None = None, ): headers = {} parameters = {} if scaling_factor is not None: parameters["scaling_factor"] = scaling_factor if shift_factor is not None: parameters["shift_factor"] = shift_factor if isinstance(image, torch.Tensor): data = safetensors.torch._tobytes(image.contiguous(), "tensor") parameters["shape"] = list(image.shape) parameters["dtype"] = str(image.dtype).split(".")[-1] else: buffer = io.BytesIO() image.save(buffer, format="PNG") data = buffer.getvalue() return {"data": data, "params": parameters, "headers": headers} def remote_encode( endpoint: str, image: "torch.Tensor" | Image.Image, scaling_factor: float | None = None, shift_factor: float | None = None, ) -> "torch.Tensor": """ Hugging Face Hybrid Inference that allow running VAE encode remotely. Args: endpoint (`str`): Endpoint for Remote Decode. image (`torch.Tensor` or `PIL.Image.Image`): Image to be encoded. scaling_factor (`float`, *optional*): Scaling is applied when passed e.g. [`latents * self.vae.config.scaling_factor`]. - SD v1: 0.18215 - SD XL: 0.13025 - Flux: 0.3611 If `None`, input must be passed with scaling applied. shift_factor (`float`, *optional*): Shift is applied when passed e.g. `latents - self.vae.config.shift_factor`. - Flux: 0.1159 If `None`, input must be passed with scaling applied. Returns: output (`torch.Tensor`). """ check_inputs_encode( endpoint, image, scaling_factor, shift_factor, ) kwargs = prepare_encode( image=image, scaling_factor=scaling_factor, shift_factor=shift_factor, ) response = requests.post(endpoint, **kwargs) if not response.ok: raise RuntimeError(response.json()) output = postprocess_encode( response=response, ) return output
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/utils/remote_utils.py", "license": "Apache License 2.0", "lines": 384, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/remote/test_remote_decode.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import PIL.Image import torch from diffusers.image_processor import VaeImageProcessor from diffusers.utils.constants import ( DECODE_ENDPOINT_FLUX, DECODE_ENDPOINT_HUNYUAN_VIDEO, DECODE_ENDPOINT_SD_V1, DECODE_ENDPOINT_SD_XL, ) from diffusers.utils.remote_utils import ( remote_decode, ) from diffusers.video_processor import VideoProcessor from ..testing_utils import ( enable_full_determinism, slow, torch_all_close, torch_device, ) enable_full_determinism() class RemoteAutoencoderKLMixin: shape: tuple[int, ...] = None out_hw: tuple[int, int] = None endpoint: str = None dtype: torch.dtype = None scaling_factor: float = None shift_factor: float = None processor_cls: VaeImageProcessor | VideoProcessor = None output_pil_slice: torch.Tensor = None output_pt_slice: torch.Tensor = None partial_postprocess_return_pt_slice: torch.Tensor = None return_pt_slice: torch.Tensor = None width: int = None height: int = None def get_dummy_inputs(self): inputs = { "endpoint": self.endpoint, "tensor": torch.randn( self.shape, device=torch_device, dtype=self.dtype, generator=torch.Generator(torch_device).manual_seed(13), ), "scaling_factor": self.scaling_factor, "shift_factor": self.shift_factor, "height": self.height, "width": self.width, } return inputs def test_no_scaling(self): inputs = self.get_dummy_inputs() if inputs["scaling_factor"] is not None: inputs["tensor"] = inputs["tensor"] / inputs["scaling_factor"] inputs["scaling_factor"] = None if inputs["shift_factor"] is not None: inputs["tensor"] = inputs["tensor"] + inputs["shift_factor"] inputs["shift_factor"] = None processor = self.processor_cls() output = remote_decode( output_type="pt", # required for now, will be removed in next update do_scaling=False, processor=processor, **inputs, ) assert isinstance(output, PIL.Image.Image) self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") output_slice = torch.from_numpy(np.array(output)[0, -3:, -3:].flatten()) # Increased tolerance for Flux Packed diff [1, 0, 1, 0, 0, 0, 0, 0, 0] self.assertTrue( torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), f"{output_slice}", ) def test_output_type_pt(self): inputs = self.get_dummy_inputs() processor = self.processor_cls() output = remote_decode(output_type="pt", processor=processor, **inputs) assert isinstance(output, PIL.Image.Image) self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") output_slice = torch.from_numpy(np.array(output)[0, -3:, -3:].flatten()) self.assertTrue( torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1e-2), f"{output_slice}" ) # output is visually the same, slice is flaky? def test_output_type_pil(self): inputs = self.get_dummy_inputs() output = remote_decode(output_type="pil", **inputs) self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") def test_output_type_pil_image_format(self): inputs = self.get_dummy_inputs() output = remote_decode(output_type="pil", image_format="png", **inputs) self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") self.assertEqual(output.format, "png", f"Expected image format `png`, got {output.format}") output_slice = torch.from_numpy(np.array(output)[0, -3:, -3:].flatten()) self.assertTrue( torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1e-2), f"{output_slice}" ) def test_output_type_pt_partial_postprocess(self): inputs = self.get_dummy_inputs() output = remote_decode(output_type="pt", partial_postprocess=True, **inputs) self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") output_slice = torch.from_numpy(np.array(output)[0, -3:, -3:].flatten()) self.assertTrue( torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1e-2), f"{output_slice}" ) def test_output_type_pt_return_type_pt(self): inputs = self.get_dummy_inputs() output = remote_decode(output_type="pt", return_type="pt", **inputs) self.assertTrue(isinstance(output, torch.Tensor), f"Expected `torch.Tensor` output, got {type(output)}") self.assertEqual( output.shape[2], self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.shape[2]}" ) self.assertEqual( output.shape[3], self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.shape[3]}" ) output_slice = output[0, 0, -3:, -3:].flatten() self.assertTrue( torch_all_close(output_slice, self.return_pt_slice.to(output_slice.dtype), rtol=1e-3, atol=1e-3), f"{output_slice}", ) def test_output_type_pt_partial_postprocess_return_type_pt(self): inputs = self.get_dummy_inputs() output = remote_decode(output_type="pt", partial_postprocess=True, return_type="pt", **inputs) self.assertTrue(isinstance(output, torch.Tensor), f"Expected `torch.Tensor` output, got {type(output)}") self.assertEqual( output.shape[1], self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.shape[1]}" ) self.assertEqual( output.shape[2], self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.shape[2]}" ) output_slice = output[0, -3:, -3:, 0].flatten().cpu() self.assertTrue( torch_all_close(output_slice, self.partial_postprocess_return_pt_slice.to(output_slice.dtype), rtol=1e-2), f"{output_slice}", ) def test_do_scaling_deprecation(self): inputs = self.get_dummy_inputs() inputs.pop("scaling_factor", None) inputs.pop("shift_factor", None) with self.assertWarns(FutureWarning) as warning: _ = remote_decode(output_type="pt", partial_postprocess=True, **inputs) self.assertEqual( str(warning.warnings[0].message), "`do_scaling` is deprecated, pass `scaling_factor` and `shift_factor` if required.", str(warning.warnings[0].message), ) def test_input_tensor_type_base64_deprecation(self): inputs = self.get_dummy_inputs() with self.assertWarns(FutureWarning) as warning: _ = remote_decode(output_type="pt", input_tensor_type="base64", partial_postprocess=True, **inputs) self.assertEqual( str(warning.warnings[0].message), "input_tensor_type='base64' is deprecated. Using `binary`.", str(warning.warnings[0].message), ) def test_output_tensor_type_base64_deprecation(self): inputs = self.get_dummy_inputs() with self.assertWarns(FutureWarning) as warning: _ = remote_decode(output_type="pt", output_tensor_type="base64", partial_postprocess=True, **inputs) self.assertEqual( str(warning.warnings[0].message), "output_tensor_type='base64' is deprecated. Using `binary`.", str(warning.warnings[0].message), ) class RemoteAutoencoderKLHunyuanVideoMixin(RemoteAutoencoderKLMixin): def test_no_scaling(self): inputs = self.get_dummy_inputs() if inputs["scaling_factor"] is not None: inputs["tensor"] = inputs["tensor"] / inputs["scaling_factor"] inputs["scaling_factor"] = None if inputs["shift_factor"] is not None: inputs["tensor"] = inputs["tensor"] + inputs["shift_factor"] inputs["shift_factor"] = None processor = self.processor_cls() output = remote_decode( output_type="pt", # required for now, will be removed in next update do_scaling=False, processor=processor, **inputs, ) self.assertTrue( isinstance(output, list) and isinstance(output[0], PIL.Image.Image), f"Expected `List[PIL.Image.Image]` output, got {type(output)}", ) self.assertEqual( output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" ) self.assertEqual( output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" ) output_slice = torch.from_numpy(np.array(output[0])[0, -3:, -3:].flatten()) self.assertTrue( torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), f"{output_slice}", ) def test_output_type_pt(self): inputs = self.get_dummy_inputs() processor = self.processor_cls() output = remote_decode(output_type="pt", processor=processor, **inputs) self.assertTrue( isinstance(output, list) and isinstance(output[0], PIL.Image.Image), f"Expected `List[PIL.Image.Image]` output, got {type(output)}", ) self.assertEqual( output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" ) self.assertEqual( output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" ) output_slice = torch.from_numpy(np.array(output[0])[0, -3:, -3:].flatten()) self.assertTrue( torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), f"{output_slice}", ) # output is visually the same, slice is flaky? def test_output_type_pil(self): inputs = self.get_dummy_inputs() processor = self.processor_cls() output = remote_decode(output_type="pil", processor=processor, **inputs) self.assertTrue( isinstance(output, list) and isinstance(output[0], PIL.Image.Image), f"Expected `List[PIL.Image.Image]` output, got {type(output)}", ) self.assertEqual( output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" ) self.assertEqual( output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" ) def test_output_type_pil_image_format(self): inputs = self.get_dummy_inputs() processor = self.processor_cls() output = remote_decode(output_type="pil", processor=processor, image_format="png", **inputs) self.assertTrue( isinstance(output, list) and isinstance(output[0], PIL.Image.Image), f"Expected `List[PIL.Image.Image]` output, got {type(output)}", ) self.assertEqual( output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" ) self.assertEqual( output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" ) output_slice = torch.from_numpy(np.array(output[0])[0, -3:, -3:].flatten()) self.assertTrue( torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), f"{output_slice}", ) def test_output_type_pt_partial_postprocess(self): inputs = self.get_dummy_inputs() output = remote_decode(output_type="pt", partial_postprocess=True, **inputs) self.assertTrue( isinstance(output, list) and isinstance(output[0], PIL.Image.Image), f"Expected `List[PIL.Image.Image]` output, got {type(output)}", ) self.assertEqual( output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" ) self.assertEqual( output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" ) output_slice = torch.from_numpy(np.array(output[0])[0, -3:, -3:].flatten()) self.assertTrue( torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), f"{output_slice}", ) def test_output_type_pt_return_type_pt(self): inputs = self.get_dummy_inputs() output = remote_decode(output_type="pt", return_type="pt", **inputs) self.assertTrue(isinstance(output, torch.Tensor), f"Expected `torch.Tensor` output, got {type(output)}") self.assertEqual( output.shape[3], self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.shape[2]}" ) self.assertEqual( output.shape[4], self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.shape[3]}" ) output_slice = output[0, 0, 0, -3:, -3:].flatten() self.assertTrue( torch_all_close(output_slice, self.return_pt_slice.to(output_slice.dtype), rtol=1e-3, atol=1e-3), f"{output_slice}", ) def test_output_type_mp4(self): inputs = self.get_dummy_inputs() output = remote_decode(output_type="mp4", return_type="mp4", **inputs) self.assertTrue(isinstance(output, bytes), f"Expected `bytes` output, got {type(output)}") class RemoteAutoencoderKLSDv1Tests( RemoteAutoencoderKLMixin, unittest.TestCase, ): shape = ( 1, 4, 64, 64, ) out_hw = ( 512, 512, ) endpoint = DECODE_ENDPOINT_SD_V1 dtype = torch.float16 scaling_factor = 0.18215 shift_factor = None processor_cls = VaeImageProcessor output_pt_slice = torch.tensor([31, 15, 11, 55, 30, 21, 66, 42, 30], dtype=torch.uint8) partial_postprocess_return_pt_slice = torch.tensor([100, 130, 99, 133, 106, 112, 97, 100, 121], dtype=torch.uint8) return_pt_slice = torch.tensor([-0.2177, 0.0217, -0.2258, 0.0412, -0.1687, -0.1232, -0.2416, -0.2130, -0.0543]) class RemoteAutoencoderKLSDXLTests( RemoteAutoencoderKLMixin, unittest.TestCase, ): shape = ( 1, 4, 128, 128, ) out_hw = ( 1024, 1024, ) endpoint = DECODE_ENDPOINT_SD_XL dtype = torch.float16 scaling_factor = 0.13025 shift_factor = None processor_cls = VaeImageProcessor output_pt_slice = torch.tensor([104, 52, 23, 114, 61, 35, 108, 87, 38], dtype=torch.uint8) partial_postprocess_return_pt_slice = torch.tensor([77, 86, 89, 49, 60, 75, 52, 65, 78], dtype=torch.uint8) return_pt_slice = torch.tensor([-0.3945, -0.3289, -0.2993, -0.6177, -0.5259, -0.4119, -0.5898, -0.4863, -0.3845]) class RemoteAutoencoderKLFluxTests( RemoteAutoencoderKLMixin, unittest.TestCase, ): shape = ( 1, 16, 128, 128, ) out_hw = ( 1024, 1024, ) endpoint = DECODE_ENDPOINT_FLUX dtype = torch.bfloat16 scaling_factor = 0.3611 shift_factor = 0.1159 processor_cls = VaeImageProcessor output_pt_slice = torch.tensor([110, 72, 91, 62, 35, 52, 69, 55, 69], dtype=torch.uint8) partial_postprocess_return_pt_slice = torch.tensor( [202, 203, 203, 197, 195, 193, 189, 188, 178], dtype=torch.uint8 ) return_pt_slice = torch.tensor([0.5820, 0.5962, 0.5898, 0.5439, 0.5327, 0.5112, 0.4797, 0.4773, 0.3984]) class RemoteAutoencoderKLFluxPackedTests( RemoteAutoencoderKLMixin, unittest.TestCase, ): shape = ( 1, 4096, 64, ) out_hw = ( 1024, 1024, ) height = 1024 width = 1024 endpoint = DECODE_ENDPOINT_FLUX dtype = torch.bfloat16 scaling_factor = 0.3611 shift_factor = 0.1159 processor_cls = VaeImageProcessor # slices are different due to randn on different shape. we can pack the latent instead if we want the same output_pt_slice = torch.tensor([96, 116, 157, 45, 67, 104, 34, 56, 89], dtype=torch.uint8) partial_postprocess_return_pt_slice = torch.tensor( [168, 212, 202, 155, 191, 185, 150, 180, 168], dtype=torch.uint8 ) return_pt_slice = torch.tensor([0.3198, 0.6631, 0.5864, 0.2131, 0.4944, 0.4482, 0.1776, 0.4153, 0.3176]) class RemoteAutoencoderKLHunyuanVideoTests( RemoteAutoencoderKLHunyuanVideoMixin, unittest.TestCase, ): shape = ( 1, 16, 3, 40, 64, ) out_hw = ( 320, 512, ) endpoint = DECODE_ENDPOINT_HUNYUAN_VIDEO dtype = torch.float16 scaling_factor = 0.476986 processor_cls = VideoProcessor output_pt_slice = torch.tensor([112, 92, 85, 112, 93, 85, 112, 94, 85], dtype=torch.uint8) partial_postprocess_return_pt_slice = torch.tensor( [149, 161, 168, 136, 150, 156, 129, 143, 149], dtype=torch.uint8 ) return_pt_slice = torch.tensor([0.1656, 0.2661, 0.3157, 0.0693, 0.1755, 0.2252, 0.0127, 0.1221, 0.1708]) class RemoteAutoencoderKLSlowTestMixin: channels: int = 4 endpoint: str = None dtype: torch.dtype = None scaling_factor: float = None shift_factor: float = None width: int = None height: int = None def get_dummy_inputs(self): inputs = { "endpoint": self.endpoint, "scaling_factor": self.scaling_factor, "shift_factor": self.shift_factor, "height": self.height, "width": self.width, } return inputs def test_multi_res(self): inputs = self.get_dummy_inputs() for height in {320, 512, 640, 704, 896, 1024, 1208, 1384, 1536, 1608, 1864, 2048}: for width in {320, 512, 640, 704, 896, 1024, 1208, 1384, 1536, 1608, 1864, 2048}: inputs["tensor"] = torch.randn( (1, self.channels, height // 8, width // 8), device=torch_device, dtype=self.dtype, generator=torch.Generator(torch_device).manual_seed(13), ) inputs["height"] = height inputs["width"] = width output = remote_decode(output_type="pt", partial_postprocess=True, **inputs) output.save(f"test_multi_res_{height}_{width}.png") @slow class RemoteAutoencoderKLSDv1SlowTests( RemoteAutoencoderKLSlowTestMixin, unittest.TestCase, ): endpoint = DECODE_ENDPOINT_SD_V1 dtype = torch.float16 scaling_factor = 0.18215 shift_factor = None @slow class RemoteAutoencoderKLSDXLSlowTests( RemoteAutoencoderKLSlowTestMixin, unittest.TestCase, ): endpoint = DECODE_ENDPOINT_SD_XL dtype = torch.float16 scaling_factor = 0.13025 shift_factor = None @slow class RemoteAutoencoderKLFluxSlowTests( RemoteAutoencoderKLSlowTestMixin, unittest.TestCase, ): channels = 16 endpoint = DECODE_ENDPOINT_FLUX dtype = torch.bfloat16 scaling_factor = 0.3611 shift_factor = 0.1159
{ "repo_id": "huggingface/diffusers", "file_path": "tests/remote/test_remote_decode.py", "license": "Apache License 2.0", "lines": 489, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:scripts/convert_wan_to_diffusers.py
import argparse import pathlib from typing import Any, Dict, Tuple import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download, snapshot_download from safetensors.torch import load_file from transformers import ( AutoProcessor, AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, CLIPVisionModelWithProjection, UMT5EncoderModel, ) from diffusers import ( AutoencoderKLWan, UniPCMultistepScheduler, WanAnimatePipeline, WanAnimateTransformer3DModel, WanImageToVideoPipeline, WanPipeline, WanTransformer3DModel, WanVACEPipeline, WanVACETransformer3DModel, ) TRANSFORMER_KEYS_RENAME_DICT = { "time_embedding.0": "condition_embedder.time_embedder.linear_1", "time_embedding.2": "condition_embedder.time_embedder.linear_2", "text_embedding.0": "condition_embedder.text_embedder.linear_1", "text_embedding.2": "condition_embedder.text_embedder.linear_2", "time_projection.1": "condition_embedder.time_proj", "head.modulation": "scale_shift_table", "head.head": "proj_out", "modulation": "scale_shift_table", "ffn.0": "ffn.net.0.proj", "ffn.2": "ffn.net.2", # Hack to swap the layer names # The original model calls the norms in following order: norm1, norm3, norm2 # We convert it to: norm1, norm2, norm3 "norm2": "norm__placeholder", "norm3": "norm2", "norm__placeholder": "norm3", # For the I2V model "img_emb.proj.0": "condition_embedder.image_embedder.norm1", "img_emb.proj.1": "condition_embedder.image_embedder.ff.net.0.proj", "img_emb.proj.3": "condition_embedder.image_embedder.ff.net.2", "img_emb.proj.4": "condition_embedder.image_embedder.norm2", # for the FLF2V model "img_emb.emb_pos": "condition_embedder.image_embedder.pos_embed", # Add attention component mappings "self_attn.q": "attn1.to_q", "self_attn.k": "attn1.to_k", "self_attn.v": "attn1.to_v", "self_attn.o": "attn1.to_out.0", "self_attn.norm_q": "attn1.norm_q", "self_attn.norm_k": "attn1.norm_k", "cross_attn.q": "attn2.to_q", "cross_attn.k": "attn2.to_k", "cross_attn.v": "attn2.to_v", "cross_attn.o": "attn2.to_out.0", "cross_attn.norm_q": "attn2.norm_q", "cross_attn.norm_k": "attn2.norm_k", "attn2.to_k_img": "attn2.add_k_proj", "attn2.to_v_img": "attn2.add_v_proj", "attn2.norm_k_img": "attn2.norm_added_k", } VACE_TRANSFORMER_KEYS_RENAME_DICT = { "time_embedding.0": "condition_embedder.time_embedder.linear_1", "time_embedding.2": "condition_embedder.time_embedder.linear_2", "text_embedding.0": "condition_embedder.text_embedder.linear_1", "text_embedding.2": "condition_embedder.text_embedder.linear_2", "time_projection.1": "condition_embedder.time_proj", "head.modulation": "scale_shift_table", "head.head": "proj_out", "modulation": "scale_shift_table", "ffn.0": "ffn.net.0.proj", "ffn.2": "ffn.net.2", # Hack to swap the layer names # The original model calls the norms in following order: norm1, norm3, norm2 # We convert it to: norm1, norm2, norm3 "norm2": "norm__placeholder", "norm3": "norm2", "norm__placeholder": "norm3", # # For the I2V model # "img_emb.proj.0": "condition_embedder.image_embedder.norm1", # "img_emb.proj.1": "condition_embedder.image_embedder.ff.net.0.proj", # "img_emb.proj.3": "condition_embedder.image_embedder.ff.net.2", # "img_emb.proj.4": "condition_embedder.image_embedder.norm2", # # for the FLF2V model # "img_emb.emb_pos": "condition_embedder.image_embedder.pos_embed", # Add attention component mappings "self_attn.q": "attn1.to_q", "self_attn.k": "attn1.to_k", "self_attn.v": "attn1.to_v", "self_attn.o": "attn1.to_out.0", "self_attn.norm_q": "attn1.norm_q", "self_attn.norm_k": "attn1.norm_k", "cross_attn.q": "attn2.to_q", "cross_attn.k": "attn2.to_k", "cross_attn.v": "attn2.to_v", "cross_attn.o": "attn2.to_out.0", "cross_attn.norm_q": "attn2.norm_q", "cross_attn.norm_k": "attn2.norm_k", "attn2.to_k_img": "attn2.add_k_proj", "attn2.to_v_img": "attn2.add_v_proj", "attn2.norm_k_img": "attn2.norm_added_k", "before_proj": "proj_in", "after_proj": "proj_out", } ANIMATE_TRANSFORMER_KEYS_RENAME_DICT = { "time_embedding.0": "condition_embedder.time_embedder.linear_1", "time_embedding.2": "condition_embedder.time_embedder.linear_2", "text_embedding.0": "condition_embedder.text_embedder.linear_1", "text_embedding.2": "condition_embedder.text_embedder.linear_2", "time_projection.1": "condition_embedder.time_proj", "head.modulation": "scale_shift_table", "head.head": "proj_out", "modulation": "scale_shift_table", "ffn.0": "ffn.net.0.proj", "ffn.2": "ffn.net.2", # Hack to swap the layer names # The original model calls the norms in following order: norm1, norm3, norm2 # We convert it to: norm1, norm2, norm3 "norm2": "norm__placeholder", "norm3": "norm2", "norm__placeholder": "norm3", "img_emb.proj.0": "condition_embedder.image_embedder.norm1", "img_emb.proj.1": "condition_embedder.image_embedder.ff.net.0.proj", "img_emb.proj.3": "condition_embedder.image_embedder.ff.net.2", "img_emb.proj.4": "condition_embedder.image_embedder.norm2", # Add attention component mappings "self_attn.q": "attn1.to_q", "self_attn.k": "attn1.to_k", "self_attn.v": "attn1.to_v", "self_attn.o": "attn1.to_out.0", "self_attn.norm_q": "attn1.norm_q", "self_attn.norm_k": "attn1.norm_k", "cross_attn.q": "attn2.to_q", "cross_attn.k": "attn2.to_k", "cross_attn.v": "attn2.to_v", "cross_attn.o": "attn2.to_out.0", "cross_attn.norm_q": "attn2.norm_q", "cross_attn.norm_k": "attn2.norm_k", "cross_attn.k_img": "attn2.to_k_img", "cross_attn.v_img": "attn2.to_v_img", "cross_attn.norm_k_img": "attn2.norm_k_img", # After cross_attn -> attn2 rename, we need to rename the img keys "attn2.to_k_img": "attn2.add_k_proj", "attn2.to_v_img": "attn2.add_v_proj", "attn2.norm_k_img": "attn2.norm_added_k", # Wan Animate-specific mappings (motion encoder, face encoder, face adapter) # Motion encoder mappings # The name mapping is complicated for the convolutional part so we handle that in its own function "motion_encoder.enc.fc": "motion_encoder.motion_network", "motion_encoder.dec.direction.weight": "motion_encoder.motion_synthesis_weight", # Face encoder mappings - CausalConv1d has a .conv submodule that we need to flatten "face_encoder.conv1_local.conv": "face_encoder.conv1_local", "face_encoder.conv2.conv": "face_encoder.conv2", "face_encoder.conv3.conv": "face_encoder.conv3", # Face adapter mappings are handled in a separate function } # TODO: Verify this and simplify if possible. def convert_animate_motion_encoder_weights(key: str, state_dict: Dict[str, Any], final_conv_idx: int = 8) -> None: """ Convert all motion encoder weights for Animate model. In the original model: - All Linear layers in fc use EqualLinear - All Conv2d layers in convs use EqualConv2d (except blur_conv which is initialized separately) - Blur kernels are stored as buffers in Sequential modules - ConvLayer is nn.Sequential with indices: [Blur (optional), EqualConv2d, FusedLeakyReLU (optional)] Conversion strategy: 1. Drop .kernel buffers (blur kernels) 2. Rename sequential indices to named components (e.g., 0 -> conv2d, 1 -> bias_leaky_relu) """ # Skip if not a weight, bias, or kernel if ".weight" not in key and ".bias" not in key and ".kernel" not in key: return # Handle Blur kernel buffers from original implementation. # After renaming, these appear under: motion_encoder.res_blocks.*.conv{2,skip}.blur_kernel # Diffusers constructs blur kernels as a non-persistent buffer so we must drop these keys if ".kernel" in key and "motion_encoder" in key: # Remove unexpected blur kernel buffers to avoid strict load errors state_dict.pop(key, None) return # Rename Sequential indices to named components in ConvLayer and ResBlock if ".enc.net_app.convs." in key and (".weight" in key or ".bias" in key): parts = key.split(".") # Find the sequential index (digit) after convs or after conv1/conv2/skip # Examples: # - enc.net_app.convs.0.0.weight -> conv_in.weight (initial conv layer weight) # - enc.net_app.convs.0.1.bias -> conv_in.act_fn.bias (initial conv layer bias) # - enc.net_app.convs.{n:1-7}.conv1.0.weight -> res_blocks.{(n-1):0-6}.conv1.weight (conv1 weight) # - e.g. enc.net_app.convs.1.conv1.0.weight -> res_blocks.0.conv1.weight # - enc.net_app.convs.{n:1-7}.conv1.1.bias -> res_blocks.{(n-1):0-6}.conv1.act_fn.bias (conv1 bias) # - e.g. enc.net_app.convs.1.conv1.1.bias -> res_blocks.0.conv1.act_fn.bias # - enc.net_app.convs.{n:1-7}.conv2.1.weight -> res_blocks.{(n-1):0-6}.conv2.weight (conv2 weight) # - enc.net_app.convs.1.conv2.2.bias -> res_blocks.0.conv2.act_fn.bias (conv2 bias) # - enc.net_app.convs.{n:1-7}.skip.1.weight -> res_blocks.{(n-1):0-6}.conv_skip.weight (skip conv weight) # - enc.net_app.convs.8 -> conv_out (final conv layer) convs_idx = parts.index("convs") if "convs" in parts else -1 if convs_idx >= 0 and len(parts) - convs_idx >= 2: bias = False # The nn.Sequential index will always follow convs sequential_idx = int(parts[convs_idx + 1]) if sequential_idx == 0: if key.endswith(".weight"): new_key = "motion_encoder.conv_in.weight" elif key.endswith(".bias"): new_key = "motion_encoder.conv_in.act_fn.bias" bias = True elif sequential_idx == final_conv_idx: if key.endswith(".weight"): new_key = "motion_encoder.conv_out.weight" else: # Intermediate .convs. layers, which get mapped to .res_blocks. prefix = "motion_encoder.res_blocks." layer_name = parts[convs_idx + 2] if layer_name == "skip": layer_name = "conv_skip" if key.endswith(".weight"): param_name = "weight" elif key.endswith(".bias"): param_name = "act_fn.bias" bias = True suffix_parts = [str(sequential_idx - 1), layer_name, param_name] suffix = ".".join(suffix_parts) new_key = prefix + suffix param = state_dict.pop(key) if bias: param = param.squeeze() state_dict[new_key] = param return return return def convert_animate_face_adapter_weights(key: str, state_dict: Dict[str, Any]) -> None: """ Convert face adapter weights for the Animate model. The original model uses a fused KV projection but the diffusers models uses separate K and V projections. """ # Skip if not a weight or bias if ".weight" not in key and ".bias" not in key: return prefix = "face_adapter." if ".fuser_blocks." in key: parts = key.split(".") module_list_idx = parts.index("fuser_blocks") if "fuser_blocks" in parts else -1 if module_list_idx >= 0 and (len(parts) - 1) - module_list_idx == 3: block_idx = parts[module_list_idx + 1] layer_name = parts[module_list_idx + 2] param_name = parts[module_list_idx + 3] if layer_name == "linear1_kv": layer_name_k = "to_k" layer_name_v = "to_v" suffix_k = ".".join([block_idx, layer_name_k, param_name]) suffix_v = ".".join([block_idx, layer_name_v, param_name]) new_key_k = prefix + suffix_k new_key_v = prefix + suffix_v kv_proj = state_dict.pop(key) k_proj, v_proj = torch.chunk(kv_proj, 2, dim=0) state_dict[new_key_k] = k_proj state_dict[new_key_v] = v_proj return else: if layer_name == "q_norm": new_layer_name = "norm_q" elif layer_name == "k_norm": new_layer_name = "norm_k" elif layer_name == "linear1_q": new_layer_name = "to_q" elif layer_name == "linear2": new_layer_name = "to_out" suffix_parts = [block_idx, new_layer_name, param_name] suffix = ".".join(suffix_parts) new_key = prefix + suffix state_dict[new_key] = state_dict.pop(key) return return TRANSFORMER_SPECIAL_KEYS_REMAP = {} VACE_TRANSFORMER_SPECIAL_KEYS_REMAP = {} ANIMATE_TRANSFORMER_SPECIAL_KEYS_REMAP = { "motion_encoder": convert_animate_motion_encoder_weights, "face_adapter": convert_animate_face_adapter_weights, } def update_state_dict_(state_dict: Dict[str, Any], old_key: str, new_key: str) -> dict[str, Any]: state_dict[new_key] = state_dict.pop(old_key) def load_sharded_safetensors(dir: pathlib.Path): file_paths = list(dir.glob("diffusion_pytorch_model*.safetensors")) state_dict = {} for path in file_paths: state_dict.update(load_file(path)) return state_dict def get_transformer_config(model_type: str) -> Tuple[Dict[str, Any], ...]: if model_type == "Wan-T2V-1.3B": config = { "model_id": "StevenZhang/Wan2.1-T2V-1.3B-Diff", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 8960, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 12, "num_layers": 30, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-T2V-14B": config = { "model_id": "StevenZhang/Wan2.1-T2V-14B-Diff", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-I2V-14B-480p": config = { "model_id": "StevenZhang/Wan2.1-I2V-14B-480P-Diff", "diffusers_config": { "image_dim": 1280, "added_kv_proj_dim": 5120, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-I2V-14B-720p": config = { "model_id": "StevenZhang/Wan2.1-I2V-14B-720P-Diff", "diffusers_config": { "image_dim": 1280, "added_kv_proj_dim": 5120, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-FLF2V-14B-720P": config = { "model_id": "ypyp/Wan2.1-FLF2V-14B-720P", # This is just a placeholder "diffusers_config": { "image_dim": 1280, "added_kv_proj_dim": 5120, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, "rope_max_seq_len": 1024, "pos_embed_seq_len": 257 * 2, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-VACE-1.3B": config = { "model_id": "Wan-AI/Wan2.1-VACE-1.3B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 8960, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 12, "num_layers": 30, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, "vace_layers": [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28], "vace_in_channels": 96, }, } RENAME_DICT = VACE_TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = VACE_TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan-VACE-14B": config = { "model_id": "Wan-AI/Wan2.1-VACE-14B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, "vace_layers": [0, 5, 10, 15, 20, 25, 30, 35], "vace_in_channels": 96, }, } RENAME_DICT = VACE_TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = VACE_TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-VACE-Fun-14B": config = { "model_id": "alibaba-pai/Wan2.2-VACE-Fun-A14B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, "vace_layers": [0, 5, 10, 15, 20, 25, 30, 35], "vace_in_channels": 96, }, } RENAME_DICT = VACE_TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = VACE_TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-I2V-14B-720p": config = { "model_id": "Wan-AI/Wan2.2-I2V-A14B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-T2V-A14B": config = { "model_id": "Wan-AI/Wan2.2-T2V-A14B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 16, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-TI2V-5B": config = { "model_id": "Wan-AI/Wan2.2-TI2V-5B", "diffusers_config": { "added_kv_proj_dim": None, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 14336, "freq_dim": 256, "in_channels": 48, "num_attention_heads": 24, "num_layers": 30, "out_channels": 48, "patch_size": [1, 2, 2], "qk_norm": "rms_norm_across_heads", "text_dim": 4096, }, } RENAME_DICT = TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "Wan2.2-Animate-14B": config = { "model_id": "Wan-AI/Wan2.2-Animate-14B", "diffusers_config": { "image_dim": 1280, "added_kv_proj_dim": 5120, "attention_head_dim": 128, "cross_attn_norm": True, "eps": 1e-06, "ffn_dim": 13824, "freq_dim": 256, "in_channels": 36, "num_attention_heads": 40, "num_layers": 40, "out_channels": 16, "patch_size": (1, 2, 2), "qk_norm": "rms_norm_across_heads", "text_dim": 4096, "rope_max_seq_len": 1024, "pos_embed_seq_len": None, "motion_encoder_size": 512, # Start of Wan Animate-specific configs "motion_style_dim": 512, "motion_dim": 20, "motion_encoder_dim": 512, "face_encoder_hidden_dim": 1024, "face_encoder_num_heads": 4, "inject_face_latents_blocks": 5, }, } RENAME_DICT = ANIMATE_TRANSFORMER_KEYS_RENAME_DICT SPECIAL_KEYS_REMAP = ANIMATE_TRANSFORMER_SPECIAL_KEYS_REMAP return config, RENAME_DICT, SPECIAL_KEYS_REMAP def convert_transformer(model_type: str, stage: str = None): config, RENAME_DICT, SPECIAL_KEYS_REMAP = get_transformer_config(model_type) diffusers_config = config["diffusers_config"] model_id = config["model_id"] model_dir = pathlib.Path(snapshot_download(model_id, repo_type="model")) if stage is not None: model_dir = model_dir / stage original_state_dict = load_sharded_safetensors(model_dir) with init_empty_weights(): if "Animate" in model_type: transformer = WanAnimateTransformer3DModel.from_config(diffusers_config) elif "VACE" in model_type: transformer = WanVACETransformer3DModel.from_config(diffusers_config) else: transformer = WanTransformer3DModel.from_config(diffusers_config) for key in list(original_state_dict.keys()): new_key = key[:] for replace_key, rename_key in RENAME_DICT.items(): new_key = new_key.replace(replace_key, rename_key) update_state_dict_(original_state_dict, key, new_key) for key in list(original_state_dict.keys()): for special_key, handler_fn_inplace in SPECIAL_KEYS_REMAP.items(): if special_key not in key: continue handler_fn_inplace(key, original_state_dict) # Load state dict into the meta model, which will materialize the tensors transformer.load_state_dict(original_state_dict, strict=True, assign=True) # Move to CPU to ensure all tensors are materialized transformer = transformer.to("cpu") return transformer def convert_vae(): vae_ckpt_path = hf_hub_download("Wan-AI/Wan2.1-T2V-14B", "Wan2.1_VAE.pth") old_state_dict = torch.load(vae_ckpt_path, weights_only=True) new_state_dict = {} # Create mappings for specific components middle_key_mapping = { # Encoder middle block "encoder.middle.0.residual.0.gamma": "encoder.mid_block.resnets.0.norm1.gamma", "encoder.middle.0.residual.2.bias": "encoder.mid_block.resnets.0.conv1.bias", "encoder.middle.0.residual.2.weight": "encoder.mid_block.resnets.0.conv1.weight", "encoder.middle.0.residual.3.gamma": "encoder.mid_block.resnets.0.norm2.gamma", "encoder.middle.0.residual.6.bias": "encoder.mid_block.resnets.0.conv2.bias", "encoder.middle.0.residual.6.weight": "encoder.mid_block.resnets.0.conv2.weight", "encoder.middle.2.residual.0.gamma": "encoder.mid_block.resnets.1.norm1.gamma", "encoder.middle.2.residual.2.bias": "encoder.mid_block.resnets.1.conv1.bias", "encoder.middle.2.residual.2.weight": "encoder.mid_block.resnets.1.conv1.weight", "encoder.middle.2.residual.3.gamma": "encoder.mid_block.resnets.1.norm2.gamma", "encoder.middle.2.residual.6.bias": "encoder.mid_block.resnets.1.conv2.bias", "encoder.middle.2.residual.6.weight": "encoder.mid_block.resnets.1.conv2.weight", # Decoder middle block "decoder.middle.0.residual.0.gamma": "decoder.mid_block.resnets.0.norm1.gamma", "decoder.middle.0.residual.2.bias": "decoder.mid_block.resnets.0.conv1.bias", "decoder.middle.0.residual.2.weight": "decoder.mid_block.resnets.0.conv1.weight", "decoder.middle.0.residual.3.gamma": "decoder.mid_block.resnets.0.norm2.gamma", "decoder.middle.0.residual.6.bias": "decoder.mid_block.resnets.0.conv2.bias", "decoder.middle.0.residual.6.weight": "decoder.mid_block.resnets.0.conv2.weight", "decoder.middle.2.residual.0.gamma": "decoder.mid_block.resnets.1.norm1.gamma", "decoder.middle.2.residual.2.bias": "decoder.mid_block.resnets.1.conv1.bias", "decoder.middle.2.residual.2.weight": "decoder.mid_block.resnets.1.conv1.weight", "decoder.middle.2.residual.3.gamma": "decoder.mid_block.resnets.1.norm2.gamma", "decoder.middle.2.residual.6.bias": "decoder.mid_block.resnets.1.conv2.bias", "decoder.middle.2.residual.6.weight": "decoder.mid_block.resnets.1.conv2.weight", } # Create a mapping for attention blocks attention_mapping = { # Encoder middle attention "encoder.middle.1.norm.gamma": "encoder.mid_block.attentions.0.norm.gamma", "encoder.middle.1.to_qkv.weight": "encoder.mid_block.attentions.0.to_qkv.weight", "encoder.middle.1.to_qkv.bias": "encoder.mid_block.attentions.0.to_qkv.bias", "encoder.middle.1.proj.weight": "encoder.mid_block.attentions.0.proj.weight", "encoder.middle.1.proj.bias": "encoder.mid_block.attentions.0.proj.bias", # Decoder middle attention "decoder.middle.1.norm.gamma": "decoder.mid_block.attentions.0.norm.gamma", "decoder.middle.1.to_qkv.weight": "decoder.mid_block.attentions.0.to_qkv.weight", "decoder.middle.1.to_qkv.bias": "decoder.mid_block.attentions.0.to_qkv.bias", "decoder.middle.1.proj.weight": "decoder.mid_block.attentions.0.proj.weight", "decoder.middle.1.proj.bias": "decoder.mid_block.attentions.0.proj.bias", } # Create a mapping for the head components head_mapping = { # Encoder head "encoder.head.0.gamma": "encoder.norm_out.gamma", "encoder.head.2.bias": "encoder.conv_out.bias", "encoder.head.2.weight": "encoder.conv_out.weight", # Decoder head "decoder.head.0.gamma": "decoder.norm_out.gamma", "decoder.head.2.bias": "decoder.conv_out.bias", "decoder.head.2.weight": "decoder.conv_out.weight", } # Create a mapping for the quant components quant_mapping = { "conv1.weight": "quant_conv.weight", "conv1.bias": "quant_conv.bias", "conv2.weight": "post_quant_conv.weight", "conv2.bias": "post_quant_conv.bias", } # Process each key in the state dict for key, value in old_state_dict.items(): # Handle middle block keys using the mapping if key in middle_key_mapping: new_key = middle_key_mapping[key] new_state_dict[new_key] = value # Handle attention blocks using the mapping elif key in attention_mapping: new_key = attention_mapping[key] new_state_dict[new_key] = value # Handle head keys using the mapping elif key in head_mapping: new_key = head_mapping[key] new_state_dict[new_key] = value # Handle quant keys using the mapping elif key in quant_mapping: new_key = quant_mapping[key] new_state_dict[new_key] = value # Handle encoder conv1 elif key == "encoder.conv1.weight": new_state_dict["encoder.conv_in.weight"] = value elif key == "encoder.conv1.bias": new_state_dict["encoder.conv_in.bias"] = value # Handle decoder conv1 elif key == "decoder.conv1.weight": new_state_dict["decoder.conv_in.weight"] = value elif key == "decoder.conv1.bias": new_state_dict["decoder.conv_in.bias"] = value # Handle encoder downsamples elif key.startswith("encoder.downsamples."): # Convert to down_blocks new_key = key.replace("encoder.downsamples.", "encoder.down_blocks.") # Convert residual block naming but keep the original structure if ".residual.0.gamma" in new_key: new_key = new_key.replace(".residual.0.gamma", ".norm1.gamma") elif ".residual.2.bias" in new_key: new_key = new_key.replace(".residual.2.bias", ".conv1.bias") elif ".residual.2.weight" in new_key: new_key = new_key.replace(".residual.2.weight", ".conv1.weight") elif ".residual.3.gamma" in new_key: new_key = new_key.replace(".residual.3.gamma", ".norm2.gamma") elif ".residual.6.bias" in new_key: new_key = new_key.replace(".residual.6.bias", ".conv2.bias") elif ".residual.6.weight" in new_key: new_key = new_key.replace(".residual.6.weight", ".conv2.weight") elif ".shortcut.bias" in new_key: new_key = new_key.replace(".shortcut.bias", ".conv_shortcut.bias") elif ".shortcut.weight" in new_key: new_key = new_key.replace(".shortcut.weight", ".conv_shortcut.weight") new_state_dict[new_key] = value # Handle decoder upsamples elif key.startswith("decoder.upsamples."): # Convert to up_blocks parts = key.split(".") block_idx = int(parts[2]) # Group residual blocks if "residual" in key: if block_idx in [0, 1, 2]: new_block_idx = 0 resnet_idx = block_idx elif block_idx in [4, 5, 6]: new_block_idx = 1 resnet_idx = block_idx - 4 elif block_idx in [8, 9, 10]: new_block_idx = 2 resnet_idx = block_idx - 8 elif block_idx in [12, 13, 14]: new_block_idx = 3 resnet_idx = block_idx - 12 else: # Keep as is for other blocks new_state_dict[key] = value continue # Convert residual block naming if ".residual.0.gamma" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.norm1.gamma" elif ".residual.2.bias" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv1.bias" elif ".residual.2.weight" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv1.weight" elif ".residual.3.gamma" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.norm2.gamma" elif ".residual.6.bias" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv2.bias" elif ".residual.6.weight" in key: new_key = f"decoder.up_blocks.{new_block_idx}.resnets.{resnet_idx}.conv2.weight" else: new_key = key new_state_dict[new_key] = value # Handle shortcut connections elif ".shortcut." in key: if block_idx == 4: new_key = key.replace(".shortcut.", ".resnets.0.conv_shortcut.") new_key = new_key.replace("decoder.upsamples.4", "decoder.up_blocks.1") else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") new_key = new_key.replace(".shortcut.", ".conv_shortcut.") new_state_dict[new_key] = value # Handle upsamplers elif ".resample." in key or ".time_conv." in key: if block_idx == 3: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.0.upsamplers.0") elif block_idx == 7: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.1.upsamplers.0") elif block_idx == 11: new_key = key.replace(f"decoder.upsamples.{block_idx}", "decoder.up_blocks.2.upsamplers.0") else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") new_state_dict[new_key] = value else: new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") new_state_dict[new_key] = value else: # Keep other keys unchanged new_state_dict[key] = value with init_empty_weights(): vae = AutoencoderKLWan() vae.load_state_dict(new_state_dict, strict=True, assign=True) return vae vae22_diffusers_config = { "base_dim": 160, "z_dim": 48, "is_residual": True, "in_channels": 12, "out_channels": 12, "decoder_base_dim": 256, "scale_factor_temporal": 4, "scale_factor_spatial": 16, "patch_size": 2, "latents_mean": [ -0.2289, -0.0052, -0.1323, -0.2339, -0.2799, 0.0174, 0.1838, 0.1557, -0.1382, 0.0542, 0.2813, 0.0891, 0.1570, -0.0098, 0.0375, -0.1825, -0.2246, -0.1207, -0.0698, 0.5109, 0.2665, -0.2108, -0.2158, 0.2502, -0.2055, -0.0322, 0.1109, 0.1567, -0.0729, 0.0899, -0.2799, -0.1230, -0.0313, -0.1649, 0.0117, 0.0723, -0.2839, -0.2083, -0.0520, 0.3748, 0.0152, 0.1957, 0.1433, -0.2944, 0.3573, -0.0548, -0.1681, -0.0667, ], "latents_std": [ 0.4765, 1.0364, 0.4514, 1.1677, 0.5313, 0.4990, 0.4818, 0.5013, 0.8158, 1.0344, 0.5894, 1.0901, 0.6885, 0.6165, 0.8454, 0.4978, 0.5759, 0.3523, 0.7135, 0.6804, 0.5833, 1.4146, 0.8986, 0.5659, 0.7069, 0.5338, 0.4889, 0.4917, 0.4069, 0.4999, 0.6866, 0.4093, 0.5709, 0.6065, 0.6415, 0.4944, 0.5726, 1.2042, 0.5458, 1.6887, 0.3971, 1.0600, 0.3943, 0.5537, 0.5444, 0.4089, 0.7468, 0.7744, ], "clip_output": False, } def convert_vae_22(): vae_ckpt_path = hf_hub_download("Wan-AI/Wan2.2-TI2V-5B", "Wan2.2_VAE.pth") old_state_dict = torch.load(vae_ckpt_path, weights_only=True) new_state_dict = {} # Create mappings for specific components middle_key_mapping = { # Encoder middle block "encoder.middle.0.residual.0.gamma": "encoder.mid_block.resnets.0.norm1.gamma", "encoder.middle.0.residual.2.bias": "encoder.mid_block.resnets.0.conv1.bias", "encoder.middle.0.residual.2.weight": "encoder.mid_block.resnets.0.conv1.weight", "encoder.middle.0.residual.3.gamma": "encoder.mid_block.resnets.0.norm2.gamma", "encoder.middle.0.residual.6.bias": "encoder.mid_block.resnets.0.conv2.bias", "encoder.middle.0.residual.6.weight": "encoder.mid_block.resnets.0.conv2.weight", "encoder.middle.2.residual.0.gamma": "encoder.mid_block.resnets.1.norm1.gamma", "encoder.middle.2.residual.2.bias": "encoder.mid_block.resnets.1.conv1.bias", "encoder.middle.2.residual.2.weight": "encoder.mid_block.resnets.1.conv1.weight", "encoder.middle.2.residual.3.gamma": "encoder.mid_block.resnets.1.norm2.gamma", "encoder.middle.2.residual.6.bias": "encoder.mid_block.resnets.1.conv2.bias", "encoder.middle.2.residual.6.weight": "encoder.mid_block.resnets.1.conv2.weight", # Decoder middle block "decoder.middle.0.residual.0.gamma": "decoder.mid_block.resnets.0.norm1.gamma", "decoder.middle.0.residual.2.bias": "decoder.mid_block.resnets.0.conv1.bias", "decoder.middle.0.residual.2.weight": "decoder.mid_block.resnets.0.conv1.weight", "decoder.middle.0.residual.3.gamma": "decoder.mid_block.resnets.0.norm2.gamma", "decoder.middle.0.residual.6.bias": "decoder.mid_block.resnets.0.conv2.bias", "decoder.middle.0.residual.6.weight": "decoder.mid_block.resnets.0.conv2.weight", "decoder.middle.2.residual.0.gamma": "decoder.mid_block.resnets.1.norm1.gamma", "decoder.middle.2.residual.2.bias": "decoder.mid_block.resnets.1.conv1.bias", "decoder.middle.2.residual.2.weight": "decoder.mid_block.resnets.1.conv1.weight", "decoder.middle.2.residual.3.gamma": "decoder.mid_block.resnets.1.norm2.gamma", "decoder.middle.2.residual.6.bias": "decoder.mid_block.resnets.1.conv2.bias", "decoder.middle.2.residual.6.weight": "decoder.mid_block.resnets.1.conv2.weight", } # Create a mapping for attention blocks attention_mapping = { # Encoder middle attention "encoder.middle.1.norm.gamma": "encoder.mid_block.attentions.0.norm.gamma", "encoder.middle.1.to_qkv.weight": "encoder.mid_block.attentions.0.to_qkv.weight", "encoder.middle.1.to_qkv.bias": "encoder.mid_block.attentions.0.to_qkv.bias", "encoder.middle.1.proj.weight": "encoder.mid_block.attentions.0.proj.weight", "encoder.middle.1.proj.bias": "encoder.mid_block.attentions.0.proj.bias", # Decoder middle attention "decoder.middle.1.norm.gamma": "decoder.mid_block.attentions.0.norm.gamma", "decoder.middle.1.to_qkv.weight": "decoder.mid_block.attentions.0.to_qkv.weight", "decoder.middle.1.to_qkv.bias": "decoder.mid_block.attentions.0.to_qkv.bias", "decoder.middle.1.proj.weight": "decoder.mid_block.attentions.0.proj.weight", "decoder.middle.1.proj.bias": "decoder.mid_block.attentions.0.proj.bias", } # Create a mapping for the head components head_mapping = { # Encoder head "encoder.head.0.gamma": "encoder.norm_out.gamma", "encoder.head.2.bias": "encoder.conv_out.bias", "encoder.head.2.weight": "encoder.conv_out.weight", # Decoder head "decoder.head.0.gamma": "decoder.norm_out.gamma", "decoder.head.2.bias": "decoder.conv_out.bias", "decoder.head.2.weight": "decoder.conv_out.weight", } # Create a mapping for the quant components quant_mapping = { "conv1.weight": "quant_conv.weight", "conv1.bias": "quant_conv.bias", "conv2.weight": "post_quant_conv.weight", "conv2.bias": "post_quant_conv.bias", } # Process each key in the state dict for key, value in old_state_dict.items(): # Handle middle block keys using the mapping if key in middle_key_mapping: new_key = middle_key_mapping[key] new_state_dict[new_key] = value # Handle attention blocks using the mapping elif key in attention_mapping: new_key = attention_mapping[key] new_state_dict[new_key] = value # Handle head keys using the mapping elif key in head_mapping: new_key = head_mapping[key] new_state_dict[new_key] = value # Handle quant keys using the mapping elif key in quant_mapping: new_key = quant_mapping[key] new_state_dict[new_key] = value # Handle encoder conv1 elif key == "encoder.conv1.weight": new_state_dict["encoder.conv_in.weight"] = value elif key == "encoder.conv1.bias": new_state_dict["encoder.conv_in.bias"] = value # Handle decoder conv1 elif key == "decoder.conv1.weight": new_state_dict["decoder.conv_in.weight"] = value elif key == "decoder.conv1.bias": new_state_dict["decoder.conv_in.bias"] = value # Handle encoder downsamples elif key.startswith("encoder.downsamples."): # Change encoder.downsamples to encoder.down_blocks new_key = key.replace("encoder.downsamples.", "encoder.down_blocks.") # Handle residual blocks - change downsamples to resnets and rename components if "residual" in new_key or "shortcut" in new_key: # Change the second downsamples to resnets new_key = new_key.replace(".downsamples.", ".resnets.") # Rename residual components if ".residual.0.gamma" in new_key: new_key = new_key.replace(".residual.0.gamma", ".norm1.gamma") elif ".residual.2.weight" in new_key: new_key = new_key.replace(".residual.2.weight", ".conv1.weight") elif ".residual.2.bias" in new_key: new_key = new_key.replace(".residual.2.bias", ".conv1.bias") elif ".residual.3.gamma" in new_key: new_key = new_key.replace(".residual.3.gamma", ".norm2.gamma") elif ".residual.6.weight" in new_key: new_key = new_key.replace(".residual.6.weight", ".conv2.weight") elif ".residual.6.bias" in new_key: new_key = new_key.replace(".residual.6.bias", ".conv2.bias") elif ".shortcut.weight" in new_key: new_key = new_key.replace(".shortcut.weight", ".conv_shortcut.weight") elif ".shortcut.bias" in new_key: new_key = new_key.replace(".shortcut.bias", ".conv_shortcut.bias") # Handle resample blocks - change downsamples to downsampler and remove index elif "resample" in new_key or "time_conv" in new_key: # Change the second downsamples to downsampler and remove the index parts = new_key.split(".") # Find the pattern: encoder.down_blocks.X.downsamples.Y.resample... # We want to change it to: encoder.down_blocks.X.downsampler.resample... if len(parts) >= 4 and parts[3] == "downsamples": # Remove the index (parts[4]) and change downsamples to downsampler new_parts = parts[:3] + ["downsampler"] + parts[5:] new_key = ".".join(new_parts) new_state_dict[new_key] = value # Handle decoder upsamples elif key.startswith("decoder.upsamples."): # Change decoder.upsamples to decoder.up_blocks new_key = key.replace("decoder.upsamples.", "decoder.up_blocks.") # Handle residual blocks - change upsamples to resnets and rename components if "residual" in new_key or "shortcut" in new_key: # Change the second upsamples to resnets new_key = new_key.replace(".upsamples.", ".resnets.") # Rename residual components if ".residual.0.gamma" in new_key: new_key = new_key.replace(".residual.0.gamma", ".norm1.gamma") elif ".residual.2.weight" in new_key: new_key = new_key.replace(".residual.2.weight", ".conv1.weight") elif ".residual.2.bias" in new_key: new_key = new_key.replace(".residual.2.bias", ".conv1.bias") elif ".residual.3.gamma" in new_key: new_key = new_key.replace(".residual.3.gamma", ".norm2.gamma") elif ".residual.6.weight" in new_key: new_key = new_key.replace(".residual.6.weight", ".conv2.weight") elif ".residual.6.bias" in new_key: new_key = new_key.replace(".residual.6.bias", ".conv2.bias") elif ".shortcut.weight" in new_key: new_key = new_key.replace(".shortcut.weight", ".conv_shortcut.weight") elif ".shortcut.bias" in new_key: new_key = new_key.replace(".shortcut.bias", ".conv_shortcut.bias") # Handle resample blocks - change upsamples to upsampler and remove index elif "resample" in new_key or "time_conv" in new_key: # Change the second upsamples to upsampler and remove the index parts = new_key.split(".") # Find the pattern: encoder.down_blocks.X.downsamples.Y.resample... # We want to change it to: encoder.down_blocks.X.downsampler.resample... if len(parts) >= 4 and parts[3] == "upsamples": # Remove the index (parts[4]) and change upsamples to upsampler new_parts = parts[:3] + ["upsampler"] + parts[5:] new_key = ".".join(new_parts) new_state_dict[new_key] = value else: # Keep other keys unchanged new_state_dict[key] = value with init_empty_weights(): vae = AutoencoderKLWan(**vae22_diffusers_config) vae.load_state_dict(new_state_dict, strict=True, assign=True) return vae def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--model_type", type=str, default=None) parser.add_argument("--output_path", type=str, required=True) parser.add_argument("--dtype", default="fp32", choices=["fp32", "fp16", "bf16", "none"]) return parser.parse_args() DTYPE_MAPPING = { "fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16, } if __name__ == "__main__": args = get_args() if "Wan2.2" in args.model_type and "TI2V" not in args.model_type and "Animate" not in args.model_type: transformer = convert_transformer(args.model_type, stage="high_noise_model") transformer_2 = convert_transformer(args.model_type, stage="low_noise_model") else: transformer = convert_transformer(args.model_type) transformer_2 = None if "Wan2.2" in args.model_type and "TI2V" in args.model_type: vae = convert_vae_22() else: vae = convert_vae() text_encoder = UMT5EncoderModel.from_pretrained("google/umt5-xxl", torch_dtype=torch.bfloat16) tokenizer = AutoTokenizer.from_pretrained("google/umt5-xxl") if "FLF2V" in args.model_type: flow_shift = 16.0 elif "TI2V" in args.model_type or "Animate" in args.model_type: flow_shift = 5.0 else: flow_shift = 3.0 scheduler = UniPCMultistepScheduler( prediction_type="flow_prediction", use_flow_sigmas=True, num_train_timesteps=1000, flow_shift=flow_shift ) # If user has specified "none", we keep the original dtypes of the state dict without any conversion if args.dtype != "none": dtype = DTYPE_MAPPING[args.dtype] transformer.to(dtype) if transformer_2 is not None: transformer_2.to(dtype) if "Wan2.2" and "I2V" in args.model_type and "TI2V" not in args.model_type: pipe = WanImageToVideoPipeline( transformer=transformer, transformer_2=transformer_2, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, boundary_ratio=0.9, ) elif "Wan2.2" and "T2V" in args.model_type: pipe = WanPipeline( transformer=transformer, transformer_2=transformer_2, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, boundary_ratio=0.875, ) elif "Wan2.2" and "TI2V" in args.model_type: pipe = WanPipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, expand_timesteps=True, ) elif "I2V" in args.model_type or "FLF2V" in args.model_type: image_encoder = CLIPVisionModelWithProjection.from_pretrained( "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", torch_dtype=torch.bfloat16 ) image_processor = AutoProcessor.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K") pipe = WanImageToVideoPipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, image_encoder=image_encoder, image_processor=image_processor, ) elif "Wan2.2-VACE" in args.model_type: pipe = WanVACEPipeline( transformer=transformer, transformer_2=transformer_2, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, boundary_ratio=0.875, ) elif "Wan-VACE" in args.model_type: pipe = WanVACEPipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, ) elif "Animate" in args.model_type: image_encoder = CLIPVisionModel.from_pretrained( "laion/CLIP-ViT-H-14-laion2B-s32B-b79K", torch_dtype=torch.bfloat16 ) image_processor = CLIPImageProcessor.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K") pipe = WanAnimatePipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, image_encoder=image_encoder, image_processor=image_processor, ) else: pipe = WanPipeline( transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, vae=vae, scheduler=scheduler, ) pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB")
{ "repo_id": "huggingface/diffusers", "file_path": "scripts/convert_wan_to_diffusers.py", "license": "Apache License 2.0", "lines": 1191, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:src/diffusers/models/autoencoders/autoencoder_kl_wan.py
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin from ...utils import logging from ...utils.accelerate_utils import apply_forward_hook from ..activations import get_activation from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import AutoencoderMixin, DecoderOutput, DiagonalGaussianDistribution logger = logging.get_logger(__name__) # pylint: disable=invalid-name CACHE_T = 2 class AvgDown3D(nn.Module): def __init__( self, in_channels, out_channels, factor_t, factor_s=1, ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.factor_t = factor_t self.factor_s = factor_s self.factor = self.factor_t * self.factor_s * self.factor_s assert in_channels * self.factor % out_channels == 0 self.group_size = in_channels * self.factor // out_channels def forward(self, x: torch.Tensor) -> torch.Tensor: pad_t = (self.factor_t - x.shape[2] % self.factor_t) % self.factor_t pad = (0, 0, 0, 0, pad_t, 0) x = F.pad(x, pad) B, C, T, H, W = x.shape x = x.view( B, C, T // self.factor_t, self.factor_t, H // self.factor_s, self.factor_s, W // self.factor_s, self.factor_s, ) x = x.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous() x = x.view( B, C * self.factor, T // self.factor_t, H // self.factor_s, W // self.factor_s, ) x = x.view( B, self.out_channels, self.group_size, T // self.factor_t, H // self.factor_s, W // self.factor_s, ) x = x.mean(dim=2) return x class DupUp3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, factor_t, factor_s=1, ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.factor_t = factor_t self.factor_s = factor_s self.factor = self.factor_t * self.factor_s * self.factor_s assert out_channels * self.factor % in_channels == 0 self.repeats = out_channels * self.factor // in_channels def forward(self, x: torch.Tensor, first_chunk=False) -> torch.Tensor: x = x.repeat_interleave(self.repeats, dim=1) x = x.view( x.size(0), self.out_channels, self.factor_t, self.factor_s, self.factor_s, x.size(2), x.size(3), x.size(4), ) x = x.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous() x = x.view( x.size(0), self.out_channels, x.size(2) * self.factor_t, x.size(4) * self.factor_s, x.size(6) * self.factor_s, ) if first_chunk: x = x[:, :, self.factor_t - 1 :, :, :] return x class WanCausalConv3d(nn.Conv3d): r""" A custom 3D causal convolution layer with feature caching support. This layer extends the standard Conv3D layer by ensuring causality in the time dimension and handling feature caching for efficient inference. Args: in_channels (int): Number of channels in the input image out_channels (int): Number of channels produced by the convolution kernel_size (int or tuple): Size of the convolving kernel stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to all three sides of the input. Default: 0 """ def __init__( self, in_channels: int, out_channels: int, kernel_size: int | tuple[int, int, int], stride: int | tuple[int, int, int] = 1, padding: int | tuple[int, int, int] = 0, ) -> None: super().__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, ) # Set up causal padding self._padding = (self.padding[2], self.padding[2], self.padding[1], self.padding[1], 2 * self.padding[0], 0) self.padding = (0, 0, 0) def forward(self, x, cache_x=None): padding = list(self._padding) if cache_x is not None and self._padding[4] > 0: cache_x = cache_x.to(x.device) x = torch.cat([cache_x, x], dim=2) padding[4] -= cache_x.shape[2] x = F.pad(x, padding) return super().forward(x) class WanRMS_norm(nn.Module): r""" A custom RMS normalization layer. Args: dim (int): The number of dimensions to normalize over. channel_first (bool, optional): Whether the input tensor has channels as the first dimension. Default is True. images (bool, optional): Whether the input represents image data. Default is True. bias (bool, optional): Whether to include a learnable bias term. Default is False. """ def __init__(self, dim: int, channel_first: bool = True, images: bool = True, bias: bool = False) -> None: super().__init__() broadcastable_dims = (1, 1, 1) if not images else (1, 1) shape = (dim, *broadcastable_dims) if channel_first else (dim,) self.channel_first = channel_first self.scale = dim**0.5 self.gamma = nn.Parameter(torch.ones(shape)) self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0.0 def forward(self, x): return F.normalize(x, dim=(1 if self.channel_first else -1)) * self.scale * self.gamma + self.bias class WanUpsample(nn.Upsample): r""" Perform upsampling while ensuring the output tensor has the same data type as the input. Args: x (torch.Tensor): Input tensor to be upsampled. Returns: torch.Tensor: Upsampled tensor with the same data type as the input. """ def forward(self, x): return super().forward(x.float()).type_as(x) class WanResample(nn.Module): r""" A custom resampling module for 2D and 3D data. Args: dim (int): The number of input/output channels. mode (str): The resampling mode. Must be one of: - 'none': No resampling (identity operation). - 'upsample2d': 2D upsampling with nearest-exact interpolation and convolution. - 'upsample3d': 3D upsampling with nearest-exact interpolation, convolution, and causal 3D convolution. - 'downsample2d': 2D downsampling with zero-padding and convolution. - 'downsample3d': 3D downsampling with zero-padding, convolution, and causal 3D convolution. """ def __init__(self, dim: int, mode: str, upsample_out_dim: int = None) -> None: super().__init__() self.dim = dim self.mode = mode # default to dim //2 if upsample_out_dim is None: upsample_out_dim = dim // 2 # layers if mode == "upsample2d": self.resample = nn.Sequential( WanUpsample(scale_factor=(2.0, 2.0), mode="nearest-exact"), nn.Conv2d(dim, upsample_out_dim, 3, padding=1), ) elif mode == "upsample3d": self.resample = nn.Sequential( WanUpsample(scale_factor=(2.0, 2.0), mode="nearest-exact"), nn.Conv2d(dim, upsample_out_dim, 3, padding=1), ) self.time_conv = WanCausalConv3d(dim, dim * 2, (3, 1, 1), padding=(1, 0, 0)) elif mode == "downsample2d": self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2))) elif mode == "downsample3d": self.resample = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)), nn.Conv2d(dim, dim, 3, stride=(2, 2))) self.time_conv = WanCausalConv3d(dim, dim, (3, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0)) else: self.resample = nn.Identity() def forward(self, x, feat_cache=None, feat_idx=[0]): b, c, t, h, w = x.size() if self.mode == "upsample3d": if feat_cache is not None: idx = feat_idx[0] if feat_cache[idx] is None: feat_cache[idx] = "Rep" feat_idx[0] += 1 else: cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] != "Rep": # cache last frame of last two chunk cache_x = torch.cat( [feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2 ) if cache_x.shape[2] < 2 and feat_cache[idx] is not None and feat_cache[idx] == "Rep": cache_x = torch.cat([torch.zeros_like(cache_x).to(cache_x.device), cache_x], dim=2) if feat_cache[idx] == "Rep": x = self.time_conv(x) else: x = self.time_conv(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 x = x.reshape(b, 2, c, t, h, w) x = torch.stack((x[:, 0, :, :, :, :], x[:, 1, :, :, :, :]), 3) x = x.reshape(b, c, t * 2, h, w) t = x.shape[2] x = x.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) x = self.resample(x) x = x.view(b, t, x.size(1), x.size(2), x.size(3)).permute(0, 2, 1, 3, 4) if self.mode == "downsample3d": if feat_cache is not None: idx = feat_idx[0] if feat_cache[idx] is None: feat_cache[idx] = x.clone() feat_idx[0] += 1 else: cache_x = x[:, :, -1:, :, :].clone() x = self.time_conv(torch.cat([feat_cache[idx][:, :, -1:, :, :], x], 2)) feat_cache[idx] = cache_x feat_idx[0] += 1 return x class WanResidualBlock(nn.Module): r""" A custom residual block module. Args: in_dim (int): Number of input channels. out_dim (int): Number of output channels. dropout (float, optional): Dropout rate for the dropout layer. Default is 0.0. non_linearity (str, optional): Type of non-linearity to use. Default is "silu". """ def __init__( self, in_dim: int, out_dim: int, dropout: float = 0.0, non_linearity: str = "silu", ) -> None: super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.nonlinearity = get_activation(non_linearity) # layers self.norm1 = WanRMS_norm(in_dim, images=False) self.conv1 = WanCausalConv3d(in_dim, out_dim, 3, padding=1) self.norm2 = WanRMS_norm(out_dim, images=False) self.dropout = nn.Dropout(dropout) self.conv2 = WanCausalConv3d(out_dim, out_dim, 3, padding=1) self.conv_shortcut = WanCausalConv3d(in_dim, out_dim, 1) if in_dim != out_dim else nn.Identity() def forward(self, x, feat_cache=None, feat_idx=[0]): # Apply shortcut connection h = self.conv_shortcut(x) # First normalization and activation x = self.norm1(x) x = self.nonlinearity(x) if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv1(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv1(x) # Second normalization and activation x = self.norm2(x) x = self.nonlinearity(x) # Dropout x = self.dropout(x) if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv2(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv2(x) # Add residual connection return x + h class WanAttentionBlock(nn.Module): r""" Causal self-attention with a single head. Args: dim (int): The number of channels in the input tensor. """ def __init__(self, dim): super().__init__() self.dim = dim # layers self.norm = WanRMS_norm(dim) self.to_qkv = nn.Conv2d(dim, dim * 3, 1) self.proj = nn.Conv2d(dim, dim, 1) def forward(self, x): identity = x batch_size, channels, time, height, width = x.size() x = x.permute(0, 2, 1, 3, 4).reshape(batch_size * time, channels, height, width) x = self.norm(x) # compute query, key, value qkv = self.to_qkv(x) qkv = qkv.reshape(batch_size * time, 1, channels * 3, -1) qkv = qkv.permute(0, 1, 3, 2).contiguous() q, k, v = qkv.chunk(3, dim=-1) # apply attention x = F.scaled_dot_product_attention(q, k, v) x = x.squeeze(1).permute(0, 2, 1).reshape(batch_size * time, channels, height, width) # output projection x = self.proj(x) # Reshape back: [(b*t), c, h, w] -> [b, c, t, h, w] x = x.view(batch_size, time, channels, height, width) x = x.permute(0, 2, 1, 3, 4) return x + identity class WanMidBlock(nn.Module): """ Middle block for WanVAE encoder and decoder. Args: dim (int): Number of input/output channels. dropout (float): Dropout rate. non_linearity (str): Type of non-linearity to use. """ def __init__(self, dim: int, dropout: float = 0.0, non_linearity: str = "silu", num_layers: int = 1): super().__init__() self.dim = dim # Create the components resnets = [WanResidualBlock(dim, dim, dropout, non_linearity)] attentions = [] for _ in range(num_layers): attentions.append(WanAttentionBlock(dim)) resnets.append(WanResidualBlock(dim, dim, dropout, non_linearity)) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0]): # First residual block x = self.resnets[0](x, feat_cache=feat_cache, feat_idx=feat_idx) # Process through attention and residual blocks for attn, resnet in zip(self.attentions, self.resnets[1:]): if attn is not None: x = attn(x) x = resnet(x, feat_cache=feat_cache, feat_idx=feat_idx) return x class WanResidualDownBlock(nn.Module): def __init__(self, in_dim, out_dim, dropout, num_res_blocks, temperal_downsample=False, down_flag=False): super().__init__() # Shortcut path with downsample self.avg_shortcut = AvgDown3D( in_dim, out_dim, factor_t=2 if temperal_downsample else 1, factor_s=2 if down_flag else 1, ) # Main path with residual blocks and downsample resnets = [] for _ in range(num_res_blocks): resnets.append(WanResidualBlock(in_dim, out_dim, dropout)) in_dim = out_dim self.resnets = nn.ModuleList(resnets) # Add the final downsample block if down_flag: mode = "downsample3d" if temperal_downsample else "downsample2d" self.downsampler = WanResample(out_dim, mode=mode) else: self.downsampler = None def forward(self, x, feat_cache=None, feat_idx=[0]): x_copy = x.clone() for resnet in self.resnets: x = resnet(x, feat_cache=feat_cache, feat_idx=feat_idx) if self.downsampler is not None: x = self.downsampler(x, feat_cache=feat_cache, feat_idx=feat_idx) return x + self.avg_shortcut(x_copy) class WanEncoder3d(nn.Module): r""" A 3D encoder module. Args: dim (int): The base number of channels in the first layer. z_dim (int): The dimensionality of the latent space. dim_mult (list of int): Multipliers for the number of channels in each block. num_res_blocks (int): Number of residual blocks in each block. attn_scales (list of float): Scales at which to apply attention mechanisms. temperal_downsample (list of bool): Whether to downsample temporally in each block. dropout (float): Dropout rate for the dropout layers. non_linearity (str): Type of non-linearity to use. """ def __init__( self, in_channels: int = 3, dim=128, z_dim=4, dim_mult=[1, 2, 4, 4], num_res_blocks=2, attn_scales=[], temperal_downsample=[True, True, False], dropout=0.0, non_linearity: str = "silu", is_residual: bool = False, # wan 2.2 vae use a residual downblock ): super().__init__() self.dim = dim self.z_dim = z_dim self.dim_mult = dim_mult self.num_res_blocks = num_res_blocks self.attn_scales = attn_scales self.temperal_downsample = temperal_downsample self.nonlinearity = get_activation(non_linearity) # dimensions dims = [dim * u for u in [1] + dim_mult] scale = 1.0 # init block self.conv_in = WanCausalConv3d(in_channels, dims[0], 3, padding=1) # downsample blocks self.down_blocks = nn.ModuleList([]) for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): # residual (+attention) blocks if is_residual: self.down_blocks.append( WanResidualDownBlock( in_dim, out_dim, dropout, num_res_blocks, temperal_downsample=temperal_downsample[i] if i != len(dim_mult) - 1 else False, down_flag=i != len(dim_mult) - 1, ) ) else: for _ in range(num_res_blocks): self.down_blocks.append(WanResidualBlock(in_dim, out_dim, dropout)) if scale in attn_scales: self.down_blocks.append(WanAttentionBlock(out_dim)) in_dim = out_dim # downsample block if i != len(dim_mult) - 1: mode = "downsample3d" if temperal_downsample[i] else "downsample2d" self.down_blocks.append(WanResample(out_dim, mode=mode)) scale /= 2.0 # middle blocks self.mid_block = WanMidBlock(out_dim, dropout, non_linearity, num_layers=1) # output blocks self.norm_out = WanRMS_norm(out_dim, images=False) self.conv_out = WanCausalConv3d(out_dim, z_dim, 3, padding=1) self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0]): if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: # cache last frame of last two chunk cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv_in(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv_in(x) ## downsamples for layer in self.down_blocks: if feat_cache is not None: x = layer(x, feat_cache=feat_cache, feat_idx=feat_idx) else: x = layer(x) ## middle x = self.mid_block(x, feat_cache=feat_cache, feat_idx=feat_idx) ## head x = self.norm_out(x) x = self.nonlinearity(x) if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: # cache last frame of last two chunk cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv_out(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv_out(x) return x class WanResidualUpBlock(nn.Module): """ A block that handles upsampling for the WanVAE decoder. Args: in_dim (int): Input dimension out_dim (int): Output dimension num_res_blocks (int): Number of residual blocks dropout (float): Dropout rate temperal_upsample (bool): Whether to upsample on temporal dimension up_flag (bool): Whether to upsample or not non_linearity (str): Type of non-linearity to use """ def __init__( self, in_dim: int, out_dim: int, num_res_blocks: int, dropout: float = 0.0, temperal_upsample: bool = False, up_flag: bool = False, non_linearity: str = "silu", ): super().__init__() self.in_dim = in_dim self.out_dim = out_dim if up_flag: self.avg_shortcut = DupUp3D( in_dim, out_dim, factor_t=2 if temperal_upsample else 1, factor_s=2, ) else: self.avg_shortcut = None # create residual blocks resnets = [] current_dim = in_dim for _ in range(num_res_blocks + 1): resnets.append(WanResidualBlock(current_dim, out_dim, dropout, non_linearity)) current_dim = out_dim self.resnets = nn.ModuleList(resnets) # Add upsampling layer if needed if up_flag: upsample_mode = "upsample3d" if temperal_upsample else "upsample2d" self.upsampler = WanResample(out_dim, mode=upsample_mode, upsample_out_dim=out_dim) else: self.upsampler = None self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False): """ Forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor feat_cache (list, optional): Feature cache for causal convolutions feat_idx (list, optional): Feature index for cache management Returns: torch.Tensor: Output tensor """ x_copy = x.clone() for resnet in self.resnets: if feat_cache is not None: x = resnet(x, feat_cache=feat_cache, feat_idx=feat_idx) else: x = resnet(x) if self.upsampler is not None: if feat_cache is not None: x = self.upsampler(x, feat_cache=feat_cache, feat_idx=feat_idx) else: x = self.upsampler(x) if self.avg_shortcut is not None: x = x + self.avg_shortcut(x_copy, first_chunk=first_chunk) return x class WanUpBlock(nn.Module): """ A block that handles upsampling for the WanVAE decoder. Args: in_dim (int): Input dimension out_dim (int): Output dimension num_res_blocks (int): Number of residual blocks dropout (float): Dropout rate upsample_mode (str, optional): Mode for upsampling ('upsample2d' or 'upsample3d') non_linearity (str): Type of non-linearity to use """ def __init__( self, in_dim: int, out_dim: int, num_res_blocks: int, dropout: float = 0.0, upsample_mode: str | None = None, non_linearity: str = "silu", ): super().__init__() self.in_dim = in_dim self.out_dim = out_dim # Create layers list resnets = [] # Add residual blocks and attention if needed current_dim = in_dim for _ in range(num_res_blocks + 1): resnets.append(WanResidualBlock(current_dim, out_dim, dropout, non_linearity)) current_dim = out_dim self.resnets = nn.ModuleList(resnets) # Add upsampling layer if needed self.upsamplers = None if upsample_mode is not None: self.upsamplers = nn.ModuleList([WanResample(out_dim, mode=upsample_mode)]) self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=None): """ Forward pass through the upsampling block. Args: x (torch.Tensor): Input tensor feat_cache (list, optional): Feature cache for causal convolutions feat_idx (list, optional): Feature index for cache management Returns: torch.Tensor: Output tensor """ for resnet in self.resnets: if feat_cache is not None: x = resnet(x, feat_cache=feat_cache, feat_idx=feat_idx) else: x = resnet(x) if self.upsamplers is not None: if feat_cache is not None: x = self.upsamplers[0](x, feat_cache=feat_cache, feat_idx=feat_idx) else: x = self.upsamplers[0](x) return x class WanDecoder3d(nn.Module): r""" A 3D decoder module. Args: dim (int): The base number of channels in the first layer. z_dim (int): The dimensionality of the latent space. dim_mult (list of int): Multipliers for the number of channels in each block. num_res_blocks (int): Number of residual blocks in each block. attn_scales (list of float): Scales at which to apply attention mechanisms. temperal_upsample (list of bool): Whether to upsample temporally in each block. dropout (float): Dropout rate for the dropout layers. non_linearity (str): Type of non-linearity to use. """ def __init__( self, dim=128, z_dim=4, dim_mult=[1, 2, 4, 4], num_res_blocks=2, attn_scales=[], temperal_upsample=[False, True, True], dropout=0.0, non_linearity: str = "silu", out_channels: int = 3, is_residual: bool = False, ): super().__init__() self.dim = dim self.z_dim = z_dim self.dim_mult = dim_mult self.num_res_blocks = num_res_blocks self.attn_scales = attn_scales self.temperal_upsample = temperal_upsample self.nonlinearity = get_activation(non_linearity) # dimensions dims = [dim * u for u in [dim_mult[-1]] + dim_mult[::-1]] # init block self.conv_in = WanCausalConv3d(z_dim, dims[0], 3, padding=1) # middle blocks self.mid_block = WanMidBlock(dims[0], dropout, non_linearity, num_layers=1) # upsample blocks self.up_blocks = nn.ModuleList([]) for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])): # residual (+attention) blocks if i > 0 and not is_residual: # wan vae 2.1 in_dim = in_dim // 2 # determine if we need upsampling up_flag = i != len(dim_mult) - 1 # determine upsampling mode, if not upsampling, set to None upsample_mode = None if up_flag and temperal_upsample[i]: upsample_mode = "upsample3d" elif up_flag: upsample_mode = "upsample2d" # Create and add the upsampling block if is_residual: up_block = WanResidualUpBlock( in_dim=in_dim, out_dim=out_dim, num_res_blocks=num_res_blocks, dropout=dropout, temperal_upsample=temperal_upsample[i] if up_flag else False, up_flag=up_flag, non_linearity=non_linearity, ) else: up_block = WanUpBlock( in_dim=in_dim, out_dim=out_dim, num_res_blocks=num_res_blocks, dropout=dropout, upsample_mode=upsample_mode, non_linearity=non_linearity, ) self.up_blocks.append(up_block) # output blocks self.norm_out = WanRMS_norm(out_dim, images=False) self.conv_out = WanCausalConv3d(out_dim, out_channels, 3, padding=1) self.gradient_checkpointing = False def forward(self, x, feat_cache=None, feat_idx=[0], first_chunk=False): ## conv1 if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: # cache last frame of last two chunk cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv_in(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv_in(x) ## middle x = self.mid_block(x, feat_cache=feat_cache, feat_idx=feat_idx) ## upsamples for up_block in self.up_blocks: x = up_block(x, feat_cache=feat_cache, feat_idx=feat_idx, first_chunk=first_chunk) ## head x = self.norm_out(x) x = self.nonlinearity(x) if feat_cache is not None: idx = feat_idx[0] cache_x = x[:, :, -CACHE_T:, :, :].clone() if cache_x.shape[2] < 2 and feat_cache[idx] is not None: # cache last frame of last two chunk cache_x = torch.cat([feat_cache[idx][:, :, -1, :, :].unsqueeze(2).to(cache_x.device), cache_x], dim=2) x = self.conv_out(x, feat_cache[idx]) feat_cache[idx] = cache_x feat_idx[0] += 1 else: x = self.conv_out(x) return x def patchify(x, patch_size): if patch_size == 1: return x if x.dim() != 5: raise ValueError(f"Invalid input shape: {x.shape}") # x shape: [batch_size, channels, frames, height, width] batch_size, channels, frames, height, width = x.shape # Ensure height and width are divisible by patch_size if height % patch_size != 0 or width % patch_size != 0: raise ValueError(f"Height ({height}) and width ({width}) must be divisible by patch_size ({patch_size})") # Reshape to [batch_size, channels, frames, height//patch_size, patch_size, width//patch_size, patch_size] x = x.view(batch_size, channels, frames, height // patch_size, patch_size, width // patch_size, patch_size) # Rearrange to [batch_size, channels * patch_size * patch_size, frames, height//patch_size, width//patch_size] x = x.permute(0, 1, 6, 4, 2, 3, 5).contiguous() x = x.view(batch_size, channels * patch_size * patch_size, frames, height // patch_size, width // patch_size) return x def unpatchify(x, patch_size): if patch_size == 1: return x if x.dim() != 5: raise ValueError(f"Invalid input shape: {x.shape}") # x shape: [batch_size, (channels * patch_size * patch_size), frame, height, width] batch_size, c_patches, frames, height, width = x.shape channels = c_patches // (patch_size * patch_size) # Reshape to [b, c, patch_size, patch_size, f, h, w] x = x.view(batch_size, channels, patch_size, patch_size, frames, height, width) # Rearrange to [b, c, f, h * patch_size, w * patch_size] x = x.permute(0, 1, 4, 5, 3, 6, 2).contiguous() x = x.view(batch_size, channels, frames, height * patch_size, width * patch_size) return x class AutoencoderKLWan(ModelMixin, AutoencoderMixin, ConfigMixin, FromOriginalModelMixin): r""" A VAE model with KL loss for encoding videos into latents and decoding latent representations into videos. Introduced in [Wan 2.1]. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). """ _supports_gradient_checkpointing = False _group_offload_block_modules = ["quant_conv", "post_quant_conv", "encoder", "decoder"] # keys toignore when AlignDeviceHook moves inputs/outputs between devices # these are shared mutable state modified in-place _skip_keys = ["feat_cache", "feat_idx"] @register_to_config def __init__( self, base_dim: int = 96, decoder_base_dim: int | None = None, z_dim: int = 16, dim_mult: list[int] = [1, 2, 4, 4], num_res_blocks: int = 2, attn_scales: list[float] = [], temperal_downsample: list[bool] = [False, True, True], dropout: float = 0.0, latents_mean: list[float] = [ -0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921, ], latents_std: list[float] = [ 2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.9160, ], is_residual: bool = False, in_channels: int = 3, out_channels: int = 3, patch_size: int | None = None, scale_factor_temporal: int | None = 4, scale_factor_spatial: int | None = 8, ) -> None: super().__init__() self.z_dim = z_dim self.temperal_downsample = temperal_downsample self.temperal_upsample = temperal_downsample[::-1] if decoder_base_dim is None: decoder_base_dim = base_dim self.encoder = WanEncoder3d( in_channels=in_channels, dim=base_dim, z_dim=z_dim * 2, dim_mult=dim_mult, num_res_blocks=num_res_blocks, attn_scales=attn_scales, temperal_downsample=temperal_downsample, dropout=dropout, is_residual=is_residual, ) self.quant_conv = WanCausalConv3d(z_dim * 2, z_dim * 2, 1) self.post_quant_conv = WanCausalConv3d(z_dim, z_dim, 1) self.decoder = WanDecoder3d( dim=decoder_base_dim, z_dim=z_dim, dim_mult=dim_mult, num_res_blocks=num_res_blocks, attn_scales=attn_scales, temperal_upsample=self.temperal_upsample, dropout=dropout, out_channels=out_channels, is_residual=is_residual, ) self.spatial_compression_ratio = scale_factor_spatial # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. self.use_slicing = False # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the # intermediate tiles together, the memory requirement can be lowered. self.use_tiling = False # The minimal tile height and width for spatial tiling to be used self.tile_sample_min_height = 256 self.tile_sample_min_width = 256 # The minimal distance between two spatial tiles self.tile_sample_stride_height = 192 self.tile_sample_stride_width = 192 # Precompute and cache conv counts for encoder and decoder for clear_cache speedup self._cached_conv_counts = { "decoder": sum(isinstance(m, WanCausalConv3d) for m in self.decoder.modules()) if self.decoder is not None else 0, "encoder": sum(isinstance(m, WanCausalConv3d) for m in self.encoder.modules()) if self.encoder is not None else 0, } def enable_tiling( self, tile_sample_min_height: int | None = None, tile_sample_min_width: int | None = None, tile_sample_stride_height: float | None = None, tile_sample_stride_width: float | None = None, ) -> None: r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. Args: tile_sample_min_height (`int`, *optional*): The minimum height required for a sample to be separated into tiles across the height dimension. tile_sample_min_width (`int`, *optional*): The minimum width required for a sample to be separated into tiles across the width dimension. tile_sample_stride_height (`int`, *optional*): The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are no tiling artifacts produced across the height dimension. tile_sample_stride_width (`int`, *optional*): The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling artifacts produced across the width dimension. """ self.use_tiling = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width def clear_cache(self): # Use cached conv counts for decoder and encoder to avoid re-iterating modules each call self._conv_num = self._cached_conv_counts["decoder"] self._conv_idx = [0] self._feat_map = [None] * self._conv_num # cache encode self._enc_conv_num = self._cached_conv_counts["encoder"] self._enc_conv_idx = [0] self._enc_feat_map = [None] * self._enc_conv_num def _encode(self, x: torch.Tensor): _, _, num_frame, height, width = x.shape self.clear_cache() if self.config.patch_size is not None: x = patchify(x, patch_size=self.config.patch_size) if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): return self.tiled_encode(x) iter_ = 1 + (num_frame - 1) // 4 for i in range(iter_): self._enc_conv_idx = [0] if i == 0: out = self.encoder(x[:, :, :1, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx) else: out_ = self.encoder( x[:, :, 1 + 4 * (i - 1) : 1 + 4 * i, :, :], feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx, ) out = torch.cat([out, out_], 2) enc = self.quant_conv(out) self.clear_cache() return enc @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> AutoencoderKLOutput | tuple[DiagonalGaussianDistribution]: r""" Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded videos. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool = True): _, _, num_frame, height, width = z.shape tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height): return self.tiled_decode(z, return_dict=return_dict) self.clear_cache() x = self.post_quant_conv(z) for i in range(num_frame): self._conv_idx = [0] if i == 0: out = self.decoder( x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx, first_chunk=True ) else: out_ = self.decoder(x[:, :, i : i + 1, :, :], feat_cache=self._feat_map, feat_idx=self._conv_idx) out = torch.cat([out, out_], 2) if self.config.patch_size is not None: out = unpatchify(out, patch_size=self.config.patch_size) out = torch.clamp(out, min=-1.0, max=1.0) self.clear_cache() if not return_dict: return (out,) return DecoderOutput(sample=out) @apply_forward_hook def decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: r""" Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-2], b.shape[-2], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( y / blend_extent ) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-1], b.shape[-1], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( x / blend_extent ) return b def tiled_encode(self, x: torch.Tensor) -> AutoencoderKLOutput: r"""Encode a batch of images using a tiled encoder. Args: x (`torch.Tensor`): Input batch of videos. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ _, _, num_frames, height, width = x.shape encode_spatial_compression_ratio = self.spatial_compression_ratio if self.config.patch_size is not None: assert encode_spatial_compression_ratio % self.config.patch_size == 0 encode_spatial_compression_ratio = self.spatial_compression_ratio // self.config.patch_size latent_height = height // encode_spatial_compression_ratio latent_width = width // encode_spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // encode_spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // encode_spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // encode_spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // encode_spatial_compression_ratio blend_height = tile_latent_min_height - tile_latent_stride_height blend_width = tile_latent_min_width - tile_latent_stride_width # Split x into overlapping tiles and encode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, self.tile_sample_stride_height): row = [] for j in range(0, width, self.tile_sample_stride_width): self.clear_cache() time = [] frame_range = 1 + (num_frames - 1) // 4 for k in range(frame_range): self._enc_conv_idx = [0] if k == 0: tile = x[:, :, :1, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width] else: tile = x[ :, :, 1 + 4 * (k - 1) : 1 + 4 * k, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width, ] tile = self.encoder(tile, feat_cache=self._enc_feat_map, feat_idx=self._enc_conv_idx) tile = self.quant_conv(tile) time.append(tile) row.append(torch.cat(time, dim=2)) rows.append(row) self.clear_cache() result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width]) result_rows.append(torch.cat(result_row, dim=-1)) enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width] return enc def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ _, _, num_frames, height, width = z.shape sample_height = height * self.spatial_compression_ratio sample_width = width * self.spatial_compression_ratio tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio tile_sample_stride_height = self.tile_sample_stride_height tile_sample_stride_width = self.tile_sample_stride_width if self.config.patch_size is not None: sample_height = sample_height // self.config.patch_size sample_width = sample_width // self.config.patch_size tile_sample_stride_height = tile_sample_stride_height // self.config.patch_size tile_sample_stride_width = tile_sample_stride_width // self.config.patch_size blend_height = self.tile_sample_min_height // self.config.patch_size - tile_sample_stride_height blend_width = self.tile_sample_min_width // self.config.patch_size - tile_sample_stride_width else: blend_height = self.tile_sample_min_height - tile_sample_stride_height blend_width = self.tile_sample_min_width - tile_sample_stride_width # Split z into overlapping tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, height, tile_latent_stride_height): row = [] for j in range(0, width, tile_latent_stride_width): self.clear_cache() time = [] for k in range(num_frames): self._conv_idx = [0] tile = z[:, :, k : k + 1, i : i + tile_latent_min_height, j : j + tile_latent_min_width] tile = self.post_quant_conv(tile) decoded = self.decoder( tile, feat_cache=self._feat_map, feat_idx=self._conv_idx, first_chunk=(k == 0) ) time.append(decoded) row.append(torch.cat(time, dim=2)) rows.append(row) self.clear_cache() result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :tile_sample_stride_height, :tile_sample_stride_width]) result_rows.append(torch.cat(result_row, dim=-1)) dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width] if self.config.patch_size is not None: dec = unpatchify(dec, patch_size=self.config.patch_size) dec = torch.clamp(dec, min=-1.0, max=1.0) if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: torch.Generator | None = None, ) -> DecoderOutput | torch.Tensor: """ Args: sample (`torch.Tensor`): Input sample. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, return_dict=return_dict) return dec
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/autoencoders/autoencoder_kl_wan.py", "license": "Apache License 2.0", "lines": 1202, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/models/transformers/transformer_wan.py
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import apply_lora_scale, deprecate, logging from ...utils.torch_utils import maybe_allow_in_graph from .._modeling_parallel import ContextParallelInput, ContextParallelOutput from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import PixArtAlphaTextProjection, TimestepEmbedding, Timesteps, get_1d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import FP32LayerNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _get_qkv_projections(attn: "WanAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor): # encoder_hidden_states is only passed for cross-attention if encoder_hidden_states is None: encoder_hidden_states = hidden_states if attn.fused_projections: if not attn.is_cross_attention: # In self-attention layers, we can fuse the entire QKV projection into a single linear query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) else: # In cross-attention layers, we can only fuse the KV projections into a single linear query = attn.to_q(hidden_states) key, value = attn.to_kv(encoder_hidden_states).chunk(2, dim=-1) else: query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) return query, key, value def _get_added_kv_projections(attn: "WanAttention", encoder_hidden_states_img: torch.Tensor): if attn.fused_projections: key_img, value_img = attn.to_added_kv(encoder_hidden_states_img).chunk(2, dim=-1) else: key_img = attn.add_k_proj(encoder_hidden_states_img) value_img = attn.add_v_proj(encoder_hidden_states_img) return key_img, value_img class WanAttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "WanAttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to version 2.0 or higher." ) def __call__( self, attn: "WanAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, ) -> torch.Tensor: encoder_hidden_states_img = None if attn.add_k_proj is not None: # 512 is the context length of the text encoder, hardcoded for now image_context_length = encoder_hidden_states.shape[1] - 512 encoder_hidden_states_img = encoder_hidden_states[:, :image_context_length] encoder_hidden_states = encoder_hidden_states[:, image_context_length:] query, key, value = _get_qkv_projections(attn, hidden_states, encoder_hidden_states) query = attn.norm_q(query) key = attn.norm_k(key) query = query.unflatten(2, (attn.heads, -1)) key = key.unflatten(2, (attn.heads, -1)) value = value.unflatten(2, (attn.heads, -1)) if rotary_emb is not None: def apply_rotary_emb( hidden_states: torch.Tensor, freqs_cos: torch.Tensor, freqs_sin: torch.Tensor, ): x1, x2 = hidden_states.unflatten(-1, (-1, 2)).unbind(-1) cos = freqs_cos[..., 0::2] sin = freqs_sin[..., 1::2] out = torch.empty_like(hidden_states) out[..., 0::2] = x1 * cos - x2 * sin out[..., 1::2] = x1 * sin + x2 * cos return out.type_as(hidden_states) query = apply_rotary_emb(query, *rotary_emb) key = apply_rotary_emb(key, *rotary_emb) # I2V task hidden_states_img = None if encoder_hidden_states_img is not None: key_img, value_img = _get_added_kv_projections(attn, encoder_hidden_states_img) key_img = attn.norm_added_k(key_img) key_img = key_img.unflatten(2, (attn.heads, -1)) value_img = value_img.unflatten(2, (attn.heads, -1)) hidden_states_img = dispatch_attention_fn( query, key_img, value_img, attn_mask=None, dropout_p=0.0, is_causal=False, backend=self._attention_backend, # Reference: https://github.com/huggingface/diffusers/pull/12909 parallel_config=None, ) hidden_states_img = hidden_states_img.flatten(2, 3) hidden_states_img = hidden_states_img.type_as(query) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, # Reference: https://github.com/huggingface/diffusers/pull/12909 parallel_config=(self._parallel_config if encoder_hidden_states is None else None), ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.type_as(query) if hidden_states_img is not None: hidden_states = hidden_states + hidden_states_img hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class WanAttnProcessor2_0: def __new__(cls, *args, **kwargs): deprecation_message = ( "The WanAttnProcessor2_0 class is deprecated and will be removed in a future version. " "Please use WanAttnProcessor instead. " ) deprecate("WanAttnProcessor2_0", "1.0.0", deprecation_message, standard_warn=False) return WanAttnProcessor(*args, **kwargs) class WanAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = WanAttnProcessor _available_processors = [WanAttnProcessor] def __init__( self, dim: int, heads: int = 8, dim_head: int = 64, eps: float = 1e-5, dropout: float = 0.0, added_kv_proj_dim: int | None = None, cross_attention_dim_head: int | None = None, processor=None, is_cross_attention=None, ): super().__init__() self.inner_dim = dim_head * heads self.heads = heads self.added_kv_proj_dim = added_kv_proj_dim self.cross_attention_dim_head = cross_attention_dim_head self.kv_inner_dim = self.inner_dim if cross_attention_dim_head is None else cross_attention_dim_head * heads self.to_q = torch.nn.Linear(dim, self.inner_dim, bias=True) self.to_k = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) self.to_v = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) self.to_out = torch.nn.ModuleList( [ torch.nn.Linear(self.inner_dim, dim, bias=True), torch.nn.Dropout(dropout), ] ) self.norm_q = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) self.norm_k = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) self.add_k_proj = self.add_v_proj = None if added_kv_proj_dim is not None: self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) self.norm_added_k = torch.nn.RMSNorm(dim_head * heads, eps=eps) if is_cross_attention is not None: self.is_cross_attention = is_cross_attention else: self.is_cross_attention = cross_attention_dim_head is not None self.set_processor(processor) def fuse_projections(self): if getattr(self, "fused_projections", False): return if not self.is_cross_attention: concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_qkv = nn.Linear(in_features, out_features, bias=True) self.to_qkv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) else: concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_kv = nn.Linear(in_features, out_features, bias=True) self.to_kv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) if self.added_kv_proj_dim is not None: concatenated_weights = torch.cat([self.add_k_proj.weight.data, self.add_v_proj.weight.data]) concatenated_bias = torch.cat([self.add_k_proj.bias.data, self.add_v_proj.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_added_kv = nn.Linear(in_features, out_features, bias=True) self.to_added_kv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) self.fused_projections = True @torch.no_grad() def unfuse_projections(self): if not getattr(self, "fused_projections", False): return if hasattr(self, "to_qkv"): delattr(self, "to_qkv") if hasattr(self, "to_kv"): delattr(self, "to_kv") if hasattr(self, "to_added_kv"): delattr(self, "to_added_kv") self.fused_projections = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, **kwargs, ) -> torch.Tensor: return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, rotary_emb, **kwargs) class WanImageEmbedding(torch.nn.Module): def __init__(self, in_features: int, out_features: int, pos_embed_seq_len=None): super().__init__() self.norm1 = FP32LayerNorm(in_features) self.ff = FeedForward(in_features, out_features, mult=1, activation_fn="gelu") self.norm2 = FP32LayerNorm(out_features) if pos_embed_seq_len is not None: self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_seq_len, in_features)) else: self.pos_embed = None def forward(self, encoder_hidden_states_image: torch.Tensor) -> torch.Tensor: if self.pos_embed is not None: batch_size, seq_len, embed_dim = encoder_hidden_states_image.shape encoder_hidden_states_image = encoder_hidden_states_image.view(-1, 2 * seq_len, embed_dim) encoder_hidden_states_image = encoder_hidden_states_image + self.pos_embed hidden_states = self.norm1(encoder_hidden_states_image) hidden_states = self.ff(hidden_states) hidden_states = self.norm2(hidden_states) return hidden_states class WanTimeTextImageEmbedding(nn.Module): def __init__( self, dim: int, time_freq_dim: int, time_proj_dim: int, text_embed_dim: int, image_embed_dim: int | None = None, pos_embed_seq_len: int | None = None, ): super().__init__() self.timesteps_proj = Timesteps(num_channels=time_freq_dim, flip_sin_to_cos=True, downscale_freq_shift=0) self.time_embedder = TimestepEmbedding(in_channels=time_freq_dim, time_embed_dim=dim) self.act_fn = nn.SiLU() self.time_proj = nn.Linear(dim, time_proj_dim) self.text_embedder = PixArtAlphaTextProjection(text_embed_dim, dim, act_fn="gelu_tanh") self.image_embedder = None if image_embed_dim is not None: self.image_embedder = WanImageEmbedding(image_embed_dim, dim, pos_embed_seq_len=pos_embed_seq_len) def forward( self, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_hidden_states_image: torch.Tensor | None = None, timestep_seq_len: int | None = None, ): timestep = self.timesteps_proj(timestep) if timestep_seq_len is not None: timestep = timestep.unflatten(0, (-1, timestep_seq_len)) time_embedder_dtype = next(iter(self.time_embedder.parameters())).dtype if timestep.dtype != time_embedder_dtype and time_embedder_dtype != torch.int8: timestep = timestep.to(time_embedder_dtype) temb = self.time_embedder(timestep).type_as(encoder_hidden_states) timestep_proj = self.time_proj(self.act_fn(temb)) encoder_hidden_states = self.text_embedder(encoder_hidden_states) if encoder_hidden_states_image is not None: encoder_hidden_states_image = self.image_embedder(encoder_hidden_states_image) return temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image class WanRotaryPosEmbed(nn.Module): def __init__( self, attention_head_dim: int, patch_size: tuple[int, int, int], max_seq_len: int, theta: float = 10000.0, ): super().__init__() self.attention_head_dim = attention_head_dim self.patch_size = patch_size self.max_seq_len = max_seq_len h_dim = w_dim = 2 * (attention_head_dim // 6) t_dim = attention_head_dim - h_dim - w_dim self.t_dim = t_dim self.h_dim = h_dim self.w_dim = w_dim freqs_dtype = torch.float32 if torch.backends.mps.is_available() else torch.float64 freqs_cos = [] freqs_sin = [] for dim in [t_dim, h_dim, w_dim]: freq_cos, freq_sin = get_1d_rotary_pos_embed( dim, max_seq_len, theta, use_real=True, repeat_interleave_real=True, freqs_dtype=freqs_dtype, ) freqs_cos.append(freq_cos) freqs_sin.append(freq_sin) self.register_buffer("freqs_cos", torch.cat(freqs_cos, dim=1), persistent=False) self.register_buffer("freqs_sin", torch.cat(freqs_sin, dim=1), persistent=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.patch_size ppf, pph, ppw = num_frames // p_t, height // p_h, width // p_w split_sizes = [self.t_dim, self.h_dim, self.w_dim] freqs_cos = self.freqs_cos.split(split_sizes, dim=1) freqs_sin = self.freqs_sin.split(split_sizes, dim=1) freqs_cos_f = freqs_cos[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) freqs_cos_h = freqs_cos[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) freqs_cos_w = freqs_cos[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) freqs_sin_f = freqs_sin[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) freqs_sin_h = freqs_sin[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) freqs_sin_w = freqs_sin[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) freqs_cos = torch.cat([freqs_cos_f, freqs_cos_h, freqs_cos_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) freqs_sin = torch.cat([freqs_sin_f, freqs_sin_h, freqs_sin_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) return freqs_cos, freqs_sin @maybe_allow_in_graph class WanTransformerBlock(nn.Module): def __init__( self, dim: int, ffn_dim: int, num_heads: int, qk_norm: str = "rms_norm_across_heads", cross_attn_norm: bool = False, eps: float = 1e-6, added_kv_proj_dim: int | None = None, ): super().__init__() # 1. Self-attention self.norm1 = FP32LayerNorm(dim, eps, elementwise_affine=False) self.attn1 = WanAttention( dim=dim, heads=num_heads, dim_head=dim // num_heads, eps=eps, cross_attention_dim_head=None, processor=WanAttnProcessor(), ) # 2. Cross-attention self.attn2 = WanAttention( dim=dim, heads=num_heads, dim_head=dim // num_heads, eps=eps, added_kv_proj_dim=added_kv_proj_dim, cross_attention_dim_head=dim // num_heads, processor=WanAttnProcessor(), ) self.norm2 = FP32LayerNorm(dim, eps, elementwise_affine=True) if cross_attn_norm else nn.Identity() # 3. Feed-forward self.ffn = FeedForward(dim, inner_dim=ffn_dim, activation_fn="gelu-approximate") self.norm3 = FP32LayerNorm(dim, eps, elementwise_affine=False) self.scale_shift_table = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, rotary_emb: torch.Tensor, ) -> torch.Tensor: if temb.ndim == 4: # temb: batch_size, seq_len, 6, inner_dim (wan2.2 ti2v) shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = ( self.scale_shift_table.unsqueeze(0) + temb.float() ).chunk(6, dim=2) # batch_size, seq_len, 1, inner_dim shift_msa = shift_msa.squeeze(2) scale_msa = scale_msa.squeeze(2) gate_msa = gate_msa.squeeze(2) c_shift_msa = c_shift_msa.squeeze(2) c_scale_msa = c_scale_msa.squeeze(2) c_gate_msa = c_gate_msa.squeeze(2) else: # temb: batch_size, 6, inner_dim (wan2.1/wan2.2 14B) shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = ( self.scale_shift_table + temb.float() ).chunk(6, dim=1) # 1. Self-attention norm_hidden_states = (self.norm1(hidden_states.float()) * (1 + scale_msa) + shift_msa).type_as(hidden_states) attn_output = self.attn1(norm_hidden_states, None, None, rotary_emb) hidden_states = (hidden_states.float() + attn_output * gate_msa).type_as(hidden_states) # 2. Cross-attention norm_hidden_states = self.norm2(hidden_states.float()).type_as(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states, None, None) hidden_states = hidden_states + attn_output # 3. Feed-forward norm_hidden_states = (self.norm3(hidden_states.float()) * (1 + c_scale_msa) + c_shift_msa).type_as( hidden_states ) ff_output = self.ffn(norm_hidden_states) hidden_states = (hidden_states.float() + ff_output.float() * c_gate_msa).type_as(hidden_states) return hidden_states class WanTransformer3DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin ): r""" A Transformer model for video-like data used in the Wan model. Args: patch_size (`tuple[int]`, defaults to `(1, 2, 2)`): 3D patch dimensions for video embedding (t_patch, h_patch, w_patch). num_attention_heads (`int`, defaults to `40`): Fixed length for text embeddings. attention_head_dim (`int`, defaults to `128`): The number of channels in each head. in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, defaults to `16`): The number of channels in the output. text_dim (`int`, defaults to `512`): Input dimension for text embeddings. freq_dim (`int`, defaults to `256`): Dimension for sinusoidal time embeddings. ffn_dim (`int`, defaults to `13824`): Intermediate dimension in feed-forward network. num_layers (`int`, defaults to `40`): The number of layers of transformer blocks to use. window_size (`tuple[int]`, defaults to `(-1, -1)`): Window size for local attention (-1 indicates global attention). cross_attn_norm (`bool`, defaults to `True`): Enable cross-attention normalization. qk_norm (`bool`, defaults to `True`): Enable query/key normalization. eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. add_img_emb (`bool`, defaults to `False`): Whether to use img_emb. added_kv_proj_dim (`int`, *optional*, defaults to `None`): The number of channels to use for the added key and value projections. If `None`, no projection is used. """ _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["patch_embedding", "condition_embedder", "norm"] _no_split_modules = ["WanTransformerBlock"] _keep_in_fp32_modules = ["time_embedder", "scale_shift_table", "norm1", "norm2", "norm3"] _keys_to_ignore_on_load_unexpected = ["norm_added_q"] _repeated_blocks = ["WanTransformerBlock"] _cp_plan = { "rope": { 0: ContextParallelInput(split_dim=1, expected_dims=4, split_output=True), 1: ContextParallelInput(split_dim=1, expected_dims=4, split_output=True), }, "blocks.0": { "hidden_states": ContextParallelInput(split_dim=1, expected_dims=3, split_output=False), }, # Reference: https://github.com/huggingface/diffusers/pull/12909 # We need to disable the splitting of encoder_hidden_states because the image_encoder # (Wan 2.1 I2V) consistently generates 257 tokens for image_embed. This causes the shape # of encoder_hidden_states—whose token count is always 769 (512 + 257) after concatenation # —to be indivisible by the number of devices in the CP. "proj_out": ContextParallelOutput(gather_dim=1, expected_dims=3), "": { "timestep": ContextParallelInput(split_dim=1, expected_dims=2, split_output=False), }, } @register_to_config def __init__( self, patch_size: tuple[int, ...] = (1, 2, 2), num_attention_heads: int = 40, attention_head_dim: int = 128, in_channels: int = 16, out_channels: int = 16, text_dim: int = 4096, freq_dim: int = 256, ffn_dim: int = 13824, num_layers: int = 40, cross_attn_norm: bool = True, qk_norm: str | None = "rms_norm_across_heads", eps: float = 1e-6, image_dim: int | None = None, added_kv_proj_dim: int | None = None, rope_max_seq_len: int = 1024, pos_embed_seq_len: int | None = None, ) -> None: super().__init__() inner_dim = num_attention_heads * attention_head_dim out_channels = out_channels or in_channels # 1. Patch & position embedding self.rope = WanRotaryPosEmbed(attention_head_dim, patch_size, rope_max_seq_len) self.patch_embedding = nn.Conv3d(in_channels, inner_dim, kernel_size=patch_size, stride=patch_size) # 2. Condition embeddings # image_embedding_dim=1280 for I2V model self.condition_embedder = WanTimeTextImageEmbedding( dim=inner_dim, time_freq_dim=freq_dim, time_proj_dim=inner_dim * 6, text_embed_dim=text_dim, image_embed_dim=image_dim, pos_embed_seq_len=pos_embed_seq_len, ) # 3. Transformer blocks self.blocks = nn.ModuleList( [ WanTransformerBlock( inner_dim, ffn_dim, num_attention_heads, qk_norm, cross_attn_norm, eps, added_kv_proj_dim ) for _ in range(num_layers) ] ) # 4. Output norm & projection self.norm_out = FP32LayerNorm(inner_dim, eps, elementwise_affine=False) self.proj_out = nn.Linear(inner_dim, out_channels * math.prod(patch_size)) self.scale_shift_table = nn.Parameter(torch.randn(1, 2, inner_dim) / inner_dim**0.5) self.gradient_checkpointing = False @apply_lora_scale("attention_kwargs") def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_hidden_states: torch.Tensor, encoder_hidden_states_image: torch.Tensor | None = None, return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, ) -> torch.Tensor | dict[str, torch.Tensor]: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.config.patch_size post_patch_num_frames = num_frames // p_t post_patch_height = height // p_h post_patch_width = width // p_w rotary_emb = self.rope(hidden_states) hidden_states = self.patch_embedding(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) # timestep shape: batch_size, or batch_size, seq_len (wan 2.2 ti2v) if timestep.ndim == 2: ts_seq_len = timestep.shape[1] timestep = timestep.flatten() # batch_size * seq_len else: ts_seq_len = None temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image = self.condition_embedder( timestep, encoder_hidden_states, encoder_hidden_states_image, timestep_seq_len=ts_seq_len ) if ts_seq_len is not None: # batch_size, seq_len, 6, inner_dim timestep_proj = timestep_proj.unflatten(2, (6, -1)) else: # batch_size, 6, inner_dim timestep_proj = timestep_proj.unflatten(1, (6, -1)) if encoder_hidden_states_image is not None: encoder_hidden_states = torch.concat([encoder_hidden_states_image, encoder_hidden_states], dim=1) # 4. Transformer blocks if torch.is_grad_enabled() and self.gradient_checkpointing: for block in self.blocks: hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, timestep_proj, rotary_emb ) else: for block in self.blocks: hidden_states = block(hidden_states, encoder_hidden_states, timestep_proj, rotary_emb) # 5. Output norm, projection & unpatchify if temb.ndim == 3: # batch_size, seq_len, inner_dim (wan 2.2 ti2v) shift, scale = (self.scale_shift_table.unsqueeze(0).to(temb.device) + temb.unsqueeze(2)).chunk(2, dim=2) shift = shift.squeeze(2) scale = scale.squeeze(2) else: # batch_size, inner_dim shift, scale = (self.scale_shift_table.to(temb.device) + temb.unsqueeze(1)).chunk(2, dim=1) # Move the shift and scale tensors to the same device as hidden_states. # When using multi-GPU inference via accelerate these will be on the # first device rather than the last device, which hidden_states ends up # on. shift = shift.to(hidden_states.device) scale = scale.to(hidden_states.device) hidden_states = (self.norm_out(hidden_states.float()) * (1 + scale) + shift).type_as(hidden_states) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, p_t, p_h, p_w, -1 ) hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6) output = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_wan.py", "license": "Apache License 2.0", "lines": 600, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/wan/pipeline_output.py
from dataclasses import dataclass import torch from diffusers.utils import BaseOutput @dataclass class WanPipelineOutput(BaseOutput): r""" Output class for Wan pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or list[list[PIL.Image.Image]]): list of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: torch.Tensor
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/wan/pipeline_output.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:src/diffusers/pipelines/wan/pipeline_wan.py
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from typing import Any, Callable import regex as re import torch from transformers import AutoTokenizer, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import WanLoraLoaderMixin from ...models import AutoencoderKLWan, WanTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import WanPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers.utils import export_to_video >>> from diffusers import AutoencoderKLWan, WanPipeline >>> from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler >>> # Available models: Wan-AI/Wan2.1-T2V-14B-Diffusers, Wan-AI/Wan2.1-T2V-1.3B-Diffusers >>> model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers" >>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) >>> pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16) >>> flow_shift = 5.0 # 5.0 for 720P, 3.0 for 480P >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift) >>> pipe.to("cuda") >>> prompt = "A cat and a dog baking a cake together in a kitchen. The cat is carefully measuring flour, while the dog is stirring the batter with a wooden spoon. The kitchen is cozy, with sunlight streaming through the window." >>> negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" >>> output = pipe( ... prompt=prompt, ... negative_prompt=negative_prompt, ... height=720, ... width=1280, ... num_frames=81, ... guidance_scale=5.0, ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=16) ``` """ def basic_clean(text): if is_ftfy_available(): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): text = whitespace_clean(basic_clean(text)) return text class WanPipeline(DiffusionPipeline, WanLoraLoaderMixin): r""" Pipeline for text-to-video generation using Wan. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: tokenizer ([`T5Tokenizer`]): Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. transformer ([`WanTransformer3DModel`]): Conditional Transformer to denoise the input latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. transformer_2 ([`WanTransformer3DModel`], *optional*): Conditional Transformer to denoise the input latents during the low-noise stage. If provided, enables two-stage denoising where `transformer` handles high-noise stages and `transformer_2` handles low-noise stages. If not provided, only `transformer` is used. boundary_ratio (`float`, *optional*, defaults to `None`): Ratio of total timesteps to use as the boundary for switching between transformers in two-stage denoising. The actual boundary timestep is calculated as `boundary_ratio * num_train_timesteps`. When provided, `transformer` handles timesteps >= boundary_timestep and `transformer_2` handles timesteps < boundary_timestep. If `None`, only `transformer` is used for the entire denoising process. """ model_cpu_offload_seq = "text_encoder->transformer->transformer_2->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] _optional_components = ["transformer", "transformer_2"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: UMT5EncoderModel, vae: AutoencoderKLWan, scheduler: FlowMatchEulerDiscreteScheduler, transformer: WanTransformer3DModel | None = None, transformer_2: WanTransformer3DModel | None = None, boundary_ratio: float | None = None, expand_timesteps: bool = False, # Wan2.2 ti2v ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, transformer_2=transformer_2, ) self.register_to_config(boundary_ratio=boundary_ratio) self.register_to_config(expand_timesteps=expand_timesteps) self.vae_scale_factor_temporal = self.vae.config.scale_factor_temporal if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.config.scale_factor_spatial if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_t5_prompt_embeds( self, prompt: str | list[str] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 226, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_clean(u) for u in prompt] batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask seq_lens = mask.gt(0).sum(dim=1).long() prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, max_sequence_length: int = 226, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def check_inputs( self, prompt, negative_prompt, height, width, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, guidance_scale_2=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") if self.config.boundary_ratio is None and guidance_scale_2 is not None: raise ValueError("`guidance_scale_2` is only supported when the pipeline's `boundary_ratio` is not None.") def prepare_latents( self, batch_size: int, num_channels_latents: int = 16, height: int = 480, width: int = 832, num_frames: int = 81, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 shape = ( batch_size, num_channels_latents, num_latent_frames, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @property def attention_kwargs(self): return self._attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, negative_prompt: str | list[str] = None, height: int = 480, width: int = 832, num_frames: int = 81, num_inference_steps: int = 50, guidance_scale: float = 5.0, guidance_scale_2: float | None = None, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, output_type: str | None = "np", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, pass `prompt_embeds` instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts to avoid during image generation. If not defined, pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale` < `1`). height (`int`, defaults to `480`): The height in pixels of the generated image. width (`int`, defaults to `832`): The width in pixels of the generated image. num_frames (`int`, defaults to `81`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `5.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. guidance_scale_2 (`float`, *optional*, defaults to `None`): Guidance scale for the low-noise stage transformer (`transformer_2`). If `None` and the pipeline's `boundary_ratio` is not None, uses the same value as `guidance_scale`. Only used when `transformer_2` and the pipeline's `boundary_ratio` are not None. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`WanPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `512`): The maximum sequence length of the text encoder. If the prompt is longer than this, it will be truncated. If the prompt is shorter, it will be padded to this length. Examples: Returns: [`~WanPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`WanPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt, height, width, prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, guidance_scale_2, ) if num_frames % self.vae_scale_factor_temporal != 1: logger.warning( f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number." ) num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 num_frames = max(num_frames, 1) patch_size = ( self.transformer.config.patch_size if self.transformer is not None else self.transformer_2.config.patch_size ) h_multiple_of = self.vae_scale_factor_spatial * patch_size[1] w_multiple_of = self.vae_scale_factor_spatial * patch_size[2] calc_height = height // h_multiple_of * h_multiple_of calc_width = width // w_multiple_of * w_multiple_of if height != calc_height or width != calc_width: logger.warning( f"`height` and `width` must be multiples of ({h_multiple_of}, {w_multiple_of}) for proper patchification. " f"Adjusting ({height}, {width}) -> ({calc_height}, {calc_width})." ) height, width = calc_height, calc_width if self.config.boundary_ratio is not None and guidance_scale_2 is None: guidance_scale_2 = guidance_scale self._guidance_scale = guidance_scale self._guidance_scale_2 = guidance_scale_2 self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) transformer_dtype = self.transformer.dtype if self.transformer is not None else self.transformer_2.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) if negative_prompt_embeds is not None: negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = ( self.transformer.config.in_channels if self.transformer is not None else self.transformer_2.config.in_channels ) latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, ) mask = torch.ones(latents.shape, dtype=torch.float32, device=device) # 6. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) if self.config.boundary_ratio is not None: boundary_timestep = self.config.boundary_ratio * self.scheduler.config.num_train_timesteps else: boundary_timestep = None with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t if boundary_timestep is None or t >= boundary_timestep: # wan2.1 or high-noise stage in wan2.2 current_model = self.transformer current_guidance_scale = guidance_scale else: # low-noise stage in wan2.2 current_model = self.transformer_2 current_guidance_scale = guidance_scale_2 latent_model_input = latents.to(transformer_dtype) if self.config.expand_timesteps: # seq_len: num_latent_frames * latent_height//2 * latent_width//2 temp_ts = (mask[0][0][:, ::2, ::2] * t).flatten() # batch_size, seq_len timestep = temp_ts.unsqueeze(0).expand(latents.shape[0], -1) else: timestep = t.expand(latents.shape[0]) with current_model.cache_context("cond"): noise_pred = current_model( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: with current_model.cache_context("uncond"): noise_uncond = current_model( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_uncond + current_guidance_scale * (noise_pred - noise_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return WanPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/wan/pipeline_wan.py", "license": "Apache License 2.0", "lines": 583, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/wan/pipeline_wan_i2v.py
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from typing import Any, Callable import PIL import regex as re import torch from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import WanLoraLoaderMixin from ...models import AutoencoderKLWan, WanTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import WanPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> import numpy as np >>> from diffusers import AutoencoderKLWan, WanImageToVideoPipeline >>> from diffusers.utils import export_to_video, load_image >>> from transformers import CLIPVisionModel >>> # Available models: Wan-AI/Wan2.1-I2V-14B-480P-Diffusers, Wan-AI/Wan2.1-I2V-14B-720P-Diffusers >>> model_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" >>> image_encoder = CLIPVisionModel.from_pretrained( ... model_id, subfolder="image_encoder", torch_dtype=torch.float32 ... ) >>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) >>> pipe = WanImageToVideoPipeline.from_pretrained( ... model_id, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16 ... ) >>> pipe.to("cuda") >>> image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg" ... ) >>> max_area = 480 * 832 >>> aspect_ratio = image.height / image.width >>> mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1] >>> height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value >>> width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value >>> image = image.resize((width, height)) >>> prompt = ( ... "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in " ... "the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot." ... ) >>> negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards" >>> output = pipe( ... image=image, ... prompt=prompt, ... negative_prompt=negative_prompt, ... height=height, ... width=width, ... num_frames=81, ... guidance_scale=5.0, ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=16) ``` """ def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): text = whitespace_clean(basic_clean(text)) return text # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class WanImageToVideoPipeline(DiffusionPipeline, WanLoraLoaderMixin): r""" Pipeline for image-to-video generation using Wan. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: tokenizer ([`T5Tokenizer`]): Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. image_encoder ([`CLIPVisionModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModel), specifically the [clip-vit-huge-patch14](https://github.com/mlfoundations/open_clip/blob/main/docs/PRETRAINED.md#vit-h14-xlm-roberta-large) variant. transformer ([`WanTransformer3DModel`]): Conditional Transformer to denoise the input latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. transformer_2 ([`WanTransformer3DModel`], *optional*): Conditional Transformer to denoise the input latents during the low-noise stage. In two-stage denoising, `transformer` handles high-noise stages and `transformer_2` handles low-noise stages. If not provided, only `transformer` is used. boundary_ratio (`float`, *optional*, defaults to `None`): Ratio of total timesteps to use as the boundary for switching between transformers in two-stage denoising. The actual boundary timestep is calculated as `boundary_ratio * num_train_timesteps`. When provided, `transformer` handles timesteps >= boundary_timestep and `transformer_2` handles timesteps < boundary_timestep. If `None`, only `transformer` is used for the entire denoising process. """ model_cpu_offload_seq = "text_encoder->image_encoder->transformer->transformer_2->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] _optional_components = ["transformer", "transformer_2", "image_encoder", "image_processor"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: UMT5EncoderModel, vae: AutoencoderKLWan, scheduler: FlowMatchEulerDiscreteScheduler, image_processor: CLIPImageProcessor = None, image_encoder: CLIPVisionModel = None, transformer: WanTransformer3DModel = None, transformer_2: WanTransformer3DModel = None, boundary_ratio: float | None = None, expand_timesteps: bool = False, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, image_encoder=image_encoder, transformer=transformer, scheduler=scheduler, image_processor=image_processor, transformer_2=transformer_2, ) self.register_to_config(boundary_ratio=boundary_ratio, expand_timesteps=expand_timesteps) self.vae_scale_factor_temporal = self.vae.config.scale_factor_temporal if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.config.scale_factor_spatial if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) self.image_processor = image_processor def _get_t5_prompt_embeds( self, prompt: str | list[str] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 512, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_clean(u) for u in prompt] batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask seq_lens = mask.gt(0).sum(dim=1).long() prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds def encode_image( self, image: PipelineImageInput, device: torch.device | None = None, ): device = device or self._execution_device image = self.image_processor(images=image, return_tensors="pt").to(device) image_embeds = self.image_encoder(**image, output_hidden_states=True) return image_embeds.hidden_states[-2] # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, max_sequence_length: int = 226, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def check_inputs( self, prompt, negative_prompt, image, height, width, prompt_embeds=None, negative_prompt_embeds=None, image_embeds=None, callback_on_step_end_tensor_inputs=None, guidance_scale_2=None, ): if image is not None and image_embeds is not None: raise ValueError( f"Cannot forward both `image`: {image} and `image_embeds`: {image_embeds}. Please make sure to" " only forward one of the two." ) if image is None and image_embeds is None: raise ValueError( "Provide either `image` or `prompt_embeds`. Cannot leave both `image` and `image_embeds` undefined." ) if image is not None and not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image): raise ValueError(f"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` but is {type(image)}") if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") if self.config.boundary_ratio is None and guidance_scale_2 is not None: raise ValueError("`guidance_scale_2` is only supported when the pipeline's `boundary_ratio` is not None.") if self.config.boundary_ratio is not None and image_embeds is not None: raise ValueError("Cannot forward `image_embeds` when the pipeline's `boundary_ratio` is not configured.") def prepare_latents( self, image: PipelineImageInput, batch_size: int, num_channels_latents: int = 16, height: int = 480, width: int = 832, num_frames: int = 81, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, last_image: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 latent_height = height // self.vae_scale_factor_spatial latent_width = width // self.vae_scale_factor_spatial shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) image = image.unsqueeze(2) # [batch_size, channels, 1, height, width] if self.config.expand_timesteps: video_condition = image elif last_image is None: video_condition = torch.cat( [image, image.new_zeros(image.shape[0], image.shape[1], num_frames - 1, height, width)], dim=2 ) else: last_image = last_image.unsqueeze(2) video_condition = torch.cat( [image, image.new_zeros(image.shape[0], image.shape[1], num_frames - 2, height, width), last_image], dim=2, ) video_condition = video_condition.to(device=device, dtype=self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) if isinstance(generator, list): latent_condition = [ retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax") for _ in generator ] latent_condition = torch.cat(latent_condition) else: latent_condition = retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax") latent_condition = latent_condition.repeat(batch_size, 1, 1, 1, 1) latent_condition = latent_condition.to(dtype) latent_condition = (latent_condition - latents_mean) * latents_std if self.config.expand_timesteps: first_frame_mask = torch.ones( 1, 1, num_latent_frames, latent_height, latent_width, dtype=dtype, device=device ) first_frame_mask[:, :, 0] = 0 return latents, latent_condition, first_frame_mask mask_lat_size = torch.ones(batch_size, 1, num_frames, latent_height, latent_width) if last_image is None: mask_lat_size[:, :, list(range(1, num_frames))] = 0 else: mask_lat_size[:, :, list(range(1, num_frames - 1))] = 0 first_frame_mask = mask_lat_size[:, :, 0:1] first_frame_mask = torch.repeat_interleave(first_frame_mask, dim=2, repeats=self.vae_scale_factor_temporal) mask_lat_size = torch.concat([first_frame_mask, mask_lat_size[:, :, 1:, :]], dim=2) mask_lat_size = mask_lat_size.view(batch_size, -1, self.vae_scale_factor_temporal, latent_height, latent_width) mask_lat_size = mask_lat_size.transpose(1, 2) mask_lat_size = mask_lat_size.to(latent_condition.device) return latents, torch.concat([mask_lat_size, latent_condition], dim=1) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @property def attention_kwargs(self): return self._attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, prompt: str | list[str] = None, negative_prompt: str | list[str] = None, height: int = 480, width: int = 832, num_frames: int = 81, num_inference_steps: int = 50, guidance_scale: float = 5.0, guidance_scale_2: float | None = None, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, image_embeds: torch.Tensor | None = None, last_image: torch.Tensor | None = None, output_type: str | None = "np", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" The call function to the pipeline for generation. Args: image (`PipelineImageInput`): The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, defaults to `480`): The height of the generated video. width (`int`, defaults to `832`): The width of the generated video. num_frames (`int`, defaults to `81`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `5.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. guidance_scale_2 (`float`, *optional*, defaults to `None`): Guidance scale for the low-noise stage transformer (`transformer_2`). If `None` and the pipeline's `boundary_ratio` is not None, uses the same value as `guidance_scale`. Only used when `transformer_2` and the pipeline's `boundary_ratio` are not None. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `negative_prompt` input argument. image_embeds (`torch.Tensor`, *optional*): Pre-generated image embeddings. Can be used to easily tweak image inputs (weighting). If not provided, image embeddings are generated from the `image` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`WanPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `512`): The maximum sequence length of the text encoder. If the prompt is longer than this, it will be truncated. If the prompt is shorter, it will be padded to this length. Examples: Returns: [`~WanPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`WanPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt, image, height, width, prompt_embeds, negative_prompt_embeds, image_embeds, callback_on_step_end_tensor_inputs, guidance_scale_2, ) if num_frames % self.vae_scale_factor_temporal != 1: logger.warning( f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number." ) num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 num_frames = max(num_frames, 1) patch_size = ( self.transformer.config.patch_size if self.transformer is not None else self.transformer_2.config.patch_size ) h_multiple_of = self.vae_scale_factor_spatial * patch_size[1] w_multiple_of = self.vae_scale_factor_spatial * patch_size[2] calc_height = height // h_multiple_of * h_multiple_of calc_width = width // w_multiple_of * w_multiple_of if height != calc_height or width != calc_width: logger.warning( f"`height` and `width` must be multiples of ({h_multiple_of}, {w_multiple_of}) for proper patchification. " f"Adjusting ({height}, {width}) -> ({calc_height}, {calc_width})." ) height, width = calc_height, calc_width if self.config.boundary_ratio is not None and guidance_scale_2 is None: guidance_scale_2 = guidance_scale self._guidance_scale = guidance_scale self._guidance_scale_2 = guidance_scale_2 self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) # Encode image embedding transformer_dtype = self.transformer.dtype if self.transformer is not None else self.transformer_2.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) if negative_prompt_embeds is not None: negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) # only wan 2.1 i2v transformer accepts image_embeds if self.transformer is not None and self.transformer.config.image_dim is not None: if image_embeds is None: if last_image is None: image_embeds = self.encode_image(image, device) else: image_embeds = self.encode_image([image, last_image], device) image_embeds = image_embeds.repeat(batch_size, 1, 1) image_embeds = image_embeds.to(transformer_dtype) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.vae.config.z_dim image = self.video_processor.preprocess(image, height=height, width=width).to(device, dtype=torch.float32) if last_image is not None: last_image = self.video_processor.preprocess(last_image, height=height, width=width).to( device, dtype=torch.float32 ) latents_outputs = self.prepare_latents( image, batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, last_image, ) if self.config.expand_timesteps: # wan 2.2 5b i2v use firt_frame_mask to mask timesteps latents, condition, first_frame_mask = latents_outputs else: latents, condition = latents_outputs # 6. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) if self.config.boundary_ratio is not None: boundary_timestep = self.config.boundary_ratio * self.scheduler.config.num_train_timesteps else: boundary_timestep = None with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t if boundary_timestep is None or t >= boundary_timestep: # wan2.1 or high-noise stage in wan2.2 current_model = self.transformer current_guidance_scale = guidance_scale else: # low-noise stage in wan2.2 current_model = self.transformer_2 current_guidance_scale = guidance_scale_2 if self.config.expand_timesteps: latent_model_input = (1 - first_frame_mask) * condition + first_frame_mask * latents latent_model_input = latent_model_input.to(transformer_dtype) # seq_len: num_latent_frames * (latent_height // patch_size) * (latent_width // patch_size) temp_ts = (first_frame_mask[0][0][:, ::2, ::2] * t).flatten() # batch_size, seq_len timestep = temp_ts.unsqueeze(0).expand(latents.shape[0], -1) else: latent_model_input = torch.cat([latents, condition], dim=1).to(transformer_dtype) timestep = t.expand(latents.shape[0]) with current_model.cache_context("cond"): noise_pred = current_model( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, encoder_hidden_states_image=image_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: with current_model.cache_context("uncond"): noise_uncond = current_model( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, encoder_hidden_states_image=image_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_uncond + current_guidance_scale * (noise_pred - noise_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if self.config.expand_timesteps: latents = (1 - first_frame_mask) * condition + first_frame_mask * latents if not output_type == "latent": latents = latents.to(self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return WanPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/wan/pipeline_wan_i2v.py", "license": "Apache License 2.0", "lines": 733, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/models/autoencoders/test_models_autoencoder_wan.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers import AutoencoderKLWan from ...testing_utils import enable_full_determinism, floats_tensor, torch_device from ..test_modeling_common import ModelTesterMixin from .testing_utils import AutoencoderTesterMixin enable_full_determinism() class AutoencoderKLWanTests(ModelTesterMixin, AutoencoderTesterMixin, unittest.TestCase): model_class = AutoencoderKLWan main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_wan_config(self): return { "base_dim": 3, "z_dim": 16, "dim_mult": [1, 1, 1, 1], "num_res_blocks": 1, "temperal_downsample": [False, True, True], } @property def dummy_input(self): batch_size = 2 num_frames = 9 num_channels = 3 sizes = (16, 16) image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) return {"sample": image} @property def dummy_input_tiling(self): batch_size = 2 num_frames = 9 num_channels = 3 sizes = (128, 128) image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 9, 16, 16) @property def output_shape(self): return (3, 9, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_wan_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def prepare_init_args_and_inputs_for_tiling(self): init_dict = self.get_autoencoder_kl_wan_config() inputs_dict = self.dummy_input_tiling return init_dict, inputs_dict @unittest.skip("Gradient checkpointing has not been implemented yet") def test_gradient_checkpointing_is_applied(self): pass @unittest.skip("Test not supported") def test_forward_with_norm_groups(self): pass @unittest.skip("RuntimeError: fill_out not implemented for 'Float8_e4m3fn'") def test_layerwise_casting_inference(self): pass @unittest.skip("RuntimeError: fill_out not implemented for 'Float8_e4m3fn'") def test_layerwise_casting_training(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/autoencoders/test_models_autoencoder_wan.py", "license": "Apache License 2.0", "lines": 74, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/models/transformers/test_models_transformer_wan.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from diffusers import WanTransformer3DModel from diffusers.utils.torch_utils import randn_tensor from ...testing_utils import enable_full_determinism, torch_device from ..testing_utils import ( AttentionTesterMixin, BaseModelTesterConfig, BitsAndBytesTesterMixin, GGUFCompileTesterMixin, GGUFTesterMixin, MemoryTesterMixin, ModelTesterMixin, TorchAoTesterMixin, TorchCompileTesterMixin, TrainingTesterMixin, ) enable_full_determinism() class WanTransformer3DTesterConfig(BaseModelTesterConfig): @property def model_class(self): return WanTransformer3DModel @property def pretrained_model_name_or_path(self): return "hf-internal-testing/tiny-wan22-transformer" @property def output_shape(self) -> tuple[int, ...]: return (4, 2, 16, 16) @property def input_shape(self) -> tuple[int, ...]: return (4, 2, 16, 16) @property def main_input_name(self) -> str: return "hidden_states" @property def generator(self): return torch.Generator("cpu").manual_seed(0) def get_init_dict(self) -> dict[str, int | list[int] | tuple | str | bool]: return { "patch_size": (1, 2, 2), "num_attention_heads": 2, "attention_head_dim": 12, "in_channels": 4, "out_channels": 4, "text_dim": 16, "freq_dim": 256, "ffn_dim": 32, "num_layers": 2, "cross_attn_norm": True, "qk_norm": "rms_norm_across_heads", "rope_max_seq_len": 32, } def get_dummy_inputs(self) -> dict[str, torch.Tensor]: batch_size = 1 num_channels = 4 num_frames = 2 height = 16 width = 16 text_encoder_embedding_dim = 16 sequence_length = 12 return { "hidden_states": randn_tensor( (batch_size, num_channels, num_frames, height, width), generator=self.generator, device=torch_device, ), "encoder_hidden_states": randn_tensor( (batch_size, sequence_length, text_encoder_embedding_dim), generator=self.generator, device=torch_device, ), "timestep": torch.randint(0, 1000, size=(batch_size,), generator=self.generator).to(torch_device), } class TestWanTransformer3D(WanTransformer3DTesterConfig, ModelTesterMixin): """Core model tests for Wan Transformer 3D.""" @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=["fp16", "bf16"]) def test_from_save_pretrained_dtype_inference(self, tmp_path, dtype): # Skip: fp16/bf16 require very high atol to pass, providing little signal. # Dtype preservation is already tested by test_from_save_pretrained_dtype and test_keep_in_fp32_modules. pytest.skip("Tolerance requirements too high for meaningful test") class TestWanTransformer3DMemory(WanTransformer3DTesterConfig, MemoryTesterMixin): """Memory optimization tests for Wan Transformer 3D.""" class TestWanTransformer3DTraining(WanTransformer3DTesterConfig, TrainingTesterMixin): """Training tests for Wan Transformer 3D.""" def test_gradient_checkpointing_is_applied(self): expected_set = {"WanTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class TestWanTransformer3DAttention(WanTransformer3DTesterConfig, AttentionTesterMixin): """Attention processor tests for Wan Transformer 3D.""" class TestWanTransformer3DCompile(WanTransformer3DTesterConfig, TorchCompileTesterMixin): """Torch compile tests for Wan Transformer 3D.""" class TestWanTransformer3DBitsAndBytes(WanTransformer3DTesterConfig, BitsAndBytesTesterMixin): """BitsAndBytes quantization tests for Wan Transformer 3D.""" @property def torch_dtype(self): return torch.float16 def get_dummy_inputs(self): """Override to provide inputs matching the tiny Wan model dimensions.""" return { "hidden_states": randn_tensor( (1, 36, 2, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states": randn_tensor( (1, 512, 4096), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "timestep": torch.tensor([1.0]).to(torch_device, self.torch_dtype), } class TestWanTransformer3DTorchAo(WanTransformer3DTesterConfig, TorchAoTesterMixin): """TorchAO quantization tests for Wan Transformer 3D.""" @property def torch_dtype(self): return torch.bfloat16 def get_dummy_inputs(self): """Override to provide inputs matching the tiny Wan model dimensions.""" return { "hidden_states": randn_tensor( (1, 36, 2, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states": randn_tensor( (1, 512, 4096), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "timestep": torch.tensor([1.0]).to(torch_device, self.torch_dtype), } class TestWanTransformer3DGGUF(WanTransformer3DTesterConfig, GGUFTesterMixin): """GGUF quantization tests for Wan Transformer 3D.""" @property def gguf_filename(self): return "https://huggingface.co/QuantStack/Wan2.2-I2V-A14B-GGUF/blob/main/LowNoise/Wan2.2-I2V-A14B-LowNoise-Q2_K.gguf" @property def torch_dtype(self): return torch.bfloat16 def _create_quantized_model(self, config_kwargs=None, **extra_kwargs): return super()._create_quantized_model( config_kwargs, config="Wan-AI/Wan2.2-I2V-A14B-Diffusers", subfolder="transformer", **extra_kwargs ) def get_dummy_inputs(self): """Override to provide inputs matching the real Wan I2V model dimensions. Wan 2.2 I2V: in_channels=36, text_dim=4096 """ return { "hidden_states": randn_tensor( (1, 36, 2, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states": randn_tensor( (1, 512, 4096), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "timestep": torch.tensor([1.0]).to(torch_device, self.torch_dtype), } class TestWanTransformer3DGGUFCompile(WanTransformer3DTesterConfig, GGUFCompileTesterMixin): """GGUF + compile tests for Wan Transformer 3D.""" @property def gguf_filename(self): return "https://huggingface.co/QuantStack/Wan2.2-I2V-A14B-GGUF/blob/main/LowNoise/Wan2.2-I2V-A14B-LowNoise-Q2_K.gguf" @property def torch_dtype(self): return torch.bfloat16 def _create_quantized_model(self, config_kwargs=None, **extra_kwargs): return super()._create_quantized_model( config_kwargs, config="Wan-AI/Wan2.2-I2V-A14B-Diffusers", subfolder="transformer", **extra_kwargs ) def get_dummy_inputs(self): """Override to provide inputs matching the real Wan I2V model dimensions. Wan 2.2 I2V: in_channels=36, text_dim=4096 """ return { "hidden_states": randn_tensor( (1, 36, 2, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states": randn_tensor( (1, 512, 4096), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "timestep": torch.tensor([1.0]).to(torch_device, self.torch_dtype), }
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_wan.py", "license": "Apache License 2.0", "lines": 186, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/wan/test_wan.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from transformers import AutoConfig, AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanTransformer3DModel from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WanPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WanPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) # TODO: impl FlowDPMSolverMultistepScheduler scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5") text_encoder = T5EncoderModel(config) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) transformer = WanTransformer3DModel( patch_size=(1, 2, 2), num_attention_heads=2, attention_head_dim=12, in_channels=16, out_channels=16, text_dim=32, freq_dim=256, ffn_dim=32, num_layers=2, cross_attn_norm=True, qk_norm="rms_norm_across_heads", rope_max_seq_len=32, ) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "transformer_2": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "negative", # TODO "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 16, "width": 16, "num_frames": 9, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 16, 16)) # fmt: off expected_slice = torch.tensor([0.4525, 0.452, 0.4485, 0.4534, 0.4524, 0.4529, 0.454, 0.453, 0.5127, 0.5326, 0.5204, 0.5253, 0.5439, 0.5424, 0.5133, 0.5078]) # fmt: on generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass # _optional_components include transformer, transformer_2, but only transformer_2 is optional for this wan2.1 t2v pipeline def test_save_load_optional_components(self, expected_max_difference=1e-4): optional_component = "transformer_2" components = self.get_dummy_components() components[optional_component] = None pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() self.assertLess(max_diff, expected_max_difference) @slow @require_torch_accelerator class WanPipelineIntegrationTests(unittest.TestCase): prompt = "A painting of a squirrel eating a burger." def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) @unittest.skip("TODO: test needs to be implemented") def test_Wanx(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/wan/test_wan.py", "license": "Apache License 2.0", "lines": 171, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/wan/test_wan_image_to_video.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import ( AutoConfig, AutoTokenizer, CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection, T5EncoderModel, ) from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanImageToVideoPipeline, WanTransformer3DModel from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WanImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WanImageToVideoPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "height", "width"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) # TODO: impl FlowDPMSolverMultistepScheduler scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5") text_encoder = T5EncoderModel(config) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) transformer = WanTransformer3DModel( patch_size=(1, 2, 2), num_attention_heads=2, attention_head_dim=12, in_channels=36, out_channels=16, text_dim=32, freq_dim=256, ffn_dim=32, num_layers=2, cross_attn_norm=True, qk_norm="rms_norm_across_heads", rope_max_seq_len=32, image_dim=4, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=4, projection_dim=4, num_hidden_layers=2, num_attention_heads=2, image_size=32, intermediate_size=16, patch_size=1, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) torch.manual_seed(0) image_processor = CLIPImageProcessor(crop_size=32, size=32) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "image_encoder": image_encoder, "image_processor": image_processor, "transformer_2": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image_height = 16 image_width = 16 image = Image.new("RGB", (image_width, image_height)) inputs = { "image": image, "prompt": "dance monkey", "negative_prompt": "negative", # TODO "height": image_height, "width": image_width, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "num_frames": 9, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 16, 16)) # fmt: off expected_slice = torch.tensor([0.4525, 0.4525, 0.4497, 0.4536, 0.452, 0.4529, 0.454, 0.4535, 0.5072, 0.5527, 0.5165, 0.5244, 0.5481, 0.5282, 0.5208, 0.5214]) # fmt: on generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass @unittest.skip("TODO: revisit failing as it requires a very high threshold to pass") def test_inference_batch_single_identical(self): pass # _optional_components include transformer, transformer_2 and image_encoder, image_processor, but only transformer_2 is optional for wan2.1 i2v pipeline def test_save_load_optional_components(self, expected_max_difference=1e-4): optional_component = "transformer_2" components = self.get_dummy_components() components[optional_component] = None pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() self.assertLess(max_diff, expected_max_difference) class WanFLFToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WanImageToVideoPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "height", "width"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) # TODO: impl FlowDPMSolverMultistepScheduler scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5") text_encoder = T5EncoderModel(config) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) transformer = WanTransformer3DModel( patch_size=(1, 2, 2), num_attention_heads=2, attention_head_dim=12, in_channels=36, out_channels=16, text_dim=32, freq_dim=256, ffn_dim=32, num_layers=2, cross_attn_norm=True, qk_norm="rms_norm_across_heads", rope_max_seq_len=32, image_dim=4, pos_embed_seq_len=2 * (4 * 4 + 1), ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=4, projection_dim=4, num_hidden_layers=2, num_attention_heads=2, image_size=4, intermediate_size=16, patch_size=1, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) torch.manual_seed(0) image_processor = CLIPImageProcessor(crop_size=4, size=4) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "image_encoder": image_encoder, "image_processor": image_processor, "transformer_2": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image_height = 16 image_width = 16 image = Image.new("RGB", (image_width, image_height)) last_image = Image.new("RGB", (image_width, image_height)) inputs = { "image": image, "last_image": last_image, "prompt": "dance monkey", "negative_prompt": "negative", "height": image_height, "width": image_width, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "num_frames": 9, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 16, 16)) # fmt: off expected_slice = torch.tensor([0.4531, 0.4527, 0.4498, 0.4542, 0.4526, 0.4527, 0.4534, 0.4534, 0.5061, 0.5185, 0.5283, 0.5181, 0.5309, 0.5365, 0.5113, 0.5244]) # fmt: on generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass @unittest.skip("TODO: revisit failing as it requires a very high threshold to pass") def test_inference_batch_single_identical(self): pass # _optional_components include transformer, transformer_2 and image_encoder, image_processor, but only transformer_2 is optional for wan2.1 FLFT2V pipeline def test_save_load_optional_components(self, expected_max_difference=1e-4): optional_component = "transformer_2" components = self.get_dummy_components() components[optional_component] = None pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) torch.manual_seed(0) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() self.assertLess(max_diff, expected_max_difference)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/wan/test_wan_image_to_video.py", "license": "Apache License 2.0", "lines": 332, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/pipelines/marigold/pipeline_marigold_intrinsics.py
# Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved. # Copyright 2024-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- # More information and citation instructions are available on the # Marigold project website: https://marigoldcomputervision.github.io # -------------------------------------------------------------------------- from dataclasses import dataclass from typing import Any import numpy as np import torch from PIL import Image from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from ...image_processor import PipelineImageInput from ...models import ( AutoencoderKL, UNet2DConditionModel, ) from ...schedulers import ( DDIMScheduler, LCMScheduler, ) from ...utils import ( BaseOutput, is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .marigold_image_processing import MarigoldImageProcessor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import diffusers >>> import torch >>> pipe = diffusers.MarigoldIntrinsicsPipeline.from_pretrained( ... "prs-eth/marigold-iid-appearance-v1-1", variant="fp16", torch_dtype=torch.float16 ... ).to("cuda") >>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") >>> intrinsics = pipe(image) >>> vis = pipe.image_processor.visualize_intrinsics(intrinsics.prediction, pipe.target_properties) >>> vis[0]["albedo"].save("einstein_albedo.png") >>> vis[0]["roughness"].save("einstein_roughness.png") >>> vis[0]["metallicity"].save("einstein_metallicity.png") ``` ```py >>> import diffusers >>> import torch >>> pipe = diffusers.MarigoldIntrinsicsPipeline.from_pretrained( ... "prs-eth/marigold-iid-lighting-v1-1", variant="fp16", torch_dtype=torch.float16 ... ).to("cuda") >>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg") >>> intrinsics = pipe(image) >>> vis = pipe.image_processor.visualize_intrinsics(intrinsics.prediction, pipe.target_properties) >>> vis[0]["albedo"].save("einstein_albedo.png") >>> vis[0]["shading"].save("einstein_shading.png") >>> vis[0]["residual"].save("einstein_residual.png") ``` """ @dataclass class MarigoldIntrinsicsOutput(BaseOutput): """ Output class for Marigold Intrinsic Image Decomposition pipeline. Args: prediction (`np.ndarray`, `torch.Tensor`): Predicted image intrinsics with values in the range [0, 1]. The shape is `(numimages * numtargets) × 3 × height × width` for `torch.Tensor` or `(numimages * numtargets) × height × width × 3` for `np.ndarray`, where `numtargets` corresponds to the number of predicted target modalities of the intrinsic image decomposition. uncertainty (`None`, `np.ndarray`, `torch.Tensor`): Uncertainty maps computed from the ensemble, with values in the range [0, 1]. The shape is `(numimages * numtargets) × 3 × height × width` for `torch.Tensor` or `(numimages * numtargets) × height × width × 3` for `np.ndarray`. latent (`None`, `torch.Tensor`): Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline. The shape is `(numimages * numensemble) × (numtargets * 4) × latentheight × latentwidth`. """ prediction: np.ndarray | torch.Tensor uncertainty: None | np.ndarray | torch.Tensor latent: None | torch.Tensor class MarigoldIntrinsicsPipeline(DiffusionPipeline): """ Pipeline for Intrinsic Image Decomposition (IID) using the Marigold method: https://marigoldcomputervision.github.io. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: unet (`UNet2DConditionModel`): Conditional U-Net to denoise the targets latent, conditioned on image latent. vae (`AutoencoderKL`): Variational Auto-Encoder (VAE) Model to encode and decode images and predictions to and from latent representations. scheduler (`DDIMScheduler` or `LCMScheduler`): A scheduler to be used in combination with `unet` to denoise the encoded image latents. text_encoder (`CLIPTextModel`): Text-encoder, for empty text embedding. tokenizer (`CLIPTokenizer`): CLIP tokenizer. prediction_type (`str`, *optional*): Type of predictions made by the model. target_properties (`dict[str, Any]`, *optional*): Properties of the predicted modalities, such as `target_names`, a `list[str]` used to define the number, order and names of the predicted modalities, and any other metadata that may be required to interpret the predictions. default_denoising_steps (`int`, *optional*): The minimum number of denoising diffusion steps that are required to produce a prediction of reasonable quality with the given model. This value must be set in the model config. When the pipeline is called without explicitly setting `num_inference_steps`, the default value is used. This is required to ensure reasonable results with various model flavors compatible with the pipeline, such as those relying on very short denoising schedules (`LCMScheduler`) and those with full diffusion schedules (`DDIMScheduler`). default_processing_resolution (`int`, *optional*): The recommended value of the `processing_resolution` parameter of the pipeline. This value must be set in the model config. When the pipeline is called without explicitly setting `processing_resolution`, the default value is used. This is required to ensure reasonable results with various model flavors trained with varying optimal processing resolution values. """ model_cpu_offload_seq = "text_encoder->unet->vae" supported_prediction_types = ("intrinsics",) def __init__( self, unet: UNet2DConditionModel, vae: AutoencoderKL, scheduler: DDIMScheduler | LCMScheduler, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, prediction_type: str | None = None, target_properties: dict[str, Any] | None = None, default_denoising_steps: int | None = None, default_processing_resolution: int | None = None, ): super().__init__() if prediction_type not in self.supported_prediction_types: logger.warning( f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: " f"{self.supported_prediction_types}." ) self.register_modules( unet=unet, vae=vae, scheduler=scheduler, text_encoder=text_encoder, tokenizer=tokenizer, ) self.register_to_config( prediction_type=prediction_type, target_properties=target_properties, default_denoising_steps=default_denoising_steps, default_processing_resolution=default_processing_resolution, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.target_properties = target_properties self.default_denoising_steps = default_denoising_steps self.default_processing_resolution = default_processing_resolution self.empty_text_embedding = None self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor) @property def n_targets(self): return self.unet.config.out_channels // self.vae.config.latent_channels def check_inputs( self, image: PipelineImageInput, num_inference_steps: int, ensemble_size: int, processing_resolution: int, resample_method_input: str, resample_method_output: str, batch_size: int, ensembling_kwargs: dict[str, Any] | None, latents: torch.Tensor | None, generator: torch.Generator | list[torch.Generator] | None, output_type: str, output_uncertainty: bool, ) -> int: actual_vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if actual_vae_scale_factor != self.vae_scale_factor: raise ValueError( f"`vae_scale_factor` computed at initialization ({self.vae_scale_factor}) differs from the actual one ({actual_vae_scale_factor})." ) if num_inference_steps is None: raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.") if num_inference_steps < 1: raise ValueError("`num_inference_steps` must be positive.") if ensemble_size < 1: raise ValueError("`ensemble_size` must be positive.") if ensemble_size == 2: logger.warning( "`ensemble_size` == 2 results are similar to no ensembling (1); " "consider increasing the value to at least 3." ) if ensemble_size == 1 and output_uncertainty: raise ValueError( "Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` " "greater than 1." ) if processing_resolution is None: raise ValueError( "`processing_resolution` is not specified and could not be resolved from the model config." ) if processing_resolution < 0: raise ValueError( "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for " "downsampled processing." ) if processing_resolution % self.vae_scale_factor != 0: raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.") if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): raise ValueError( "`resample_method_input` takes string values compatible with PIL library: " "nearest, nearest-exact, bilinear, bicubic, area." ) if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"): raise ValueError( "`resample_method_output` takes string values compatible with PIL library: " "nearest, nearest-exact, bilinear, bicubic, area." ) if batch_size < 1: raise ValueError("`batch_size` must be positive.") if output_type not in ["pt", "np"]: raise ValueError("`output_type` must be one of `pt` or `np`.") if latents is not None and generator is not None: raise ValueError("`latents` and `generator` cannot be used together.") if ensembling_kwargs is not None: if not isinstance(ensembling_kwargs, dict): raise ValueError("`ensembling_kwargs` must be a dictionary.") if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("median", "mean"): raise ValueError("`ensembling_kwargs['reduction']` can be either `'median'` or `'mean'`.") # image checks num_images = 0 W, H = None, None if not isinstance(image, list): image = [image] for i, img in enumerate(image): if isinstance(img, np.ndarray) or torch.is_tensor(img): if img.ndim not in (2, 3, 4): raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.") H_i, W_i = img.shape[-2:] N_i = 1 if img.ndim == 4: N_i = img.shape[0] elif isinstance(img, Image.Image): W_i, H_i = img.size N_i = 1 else: raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.") if W is None: W, H = W_i, H_i elif (W, H) != (W_i, H_i): raise ValueError( f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}" ) num_images += N_i # latents checks if latents is not None: if not torch.is_tensor(latents): raise ValueError("`latents` must be a torch.Tensor.") if latents.dim() != 4: raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.") if processing_resolution > 0: max_orig = max(H, W) new_H = H * processing_resolution // max_orig new_W = W * processing_resolution // max_orig if new_H == 0 or new_W == 0: raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]") W, H = new_W, new_H w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor shape_expected = (num_images * ensemble_size, self.unet.config.out_channels, h, w) if latents.shape != shape_expected: raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.") # generator checks if generator is not None: if isinstance(generator, list): if len(generator) != num_images * ensemble_size: raise ValueError( "The number of generators must match the total number of ensemble members for all input images." ) if not all(g.device.type == generator[0].device.type for g in generator): raise ValueError("`generator` device placement is not consistent in the list.") elif not isinstance(generator, torch.Generator): raise ValueError(f"Unsupported generator type: {type(generator)}.") return num_images @torch.compiler.disable def progress_bar(self, iterable=None, total=None, desc=None, leave=True): if not hasattr(self, "_progress_bar_config"): self._progress_bar_config = {} elif not isinstance(self._progress_bar_config, dict): raise ValueError( f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}." ) progress_bar_config = dict(**self._progress_bar_config) progress_bar_config["desc"] = progress_bar_config.get("desc", desc) progress_bar_config["leave"] = progress_bar_config.get("leave", leave) if iterable is not None: return tqdm(iterable, **progress_bar_config) elif total is not None: return tqdm(total=total, **progress_bar_config) else: raise ValueError("Either `total` or `iterable` has to be defined.") @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, num_inference_steps: int | None = None, ensemble_size: int = 1, processing_resolution: int | None = None, match_input_resolution: bool = True, resample_method_input: str = "bilinear", resample_method_output: str = "bilinear", batch_size: int = 1, ensembling_kwargs: dict[str, Any] | None = None, latents: torch.Tensor | list[torch.Tensor] | None = None, generator: torch.Generator | list[torch.Generator] | None = None, output_type: str = "np", output_uncertainty: bool = False, output_latent: bool = False, return_dict: bool = True, ): """ Function invoked when calling the pipeline. Args: image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`), `list[torch.Tensor]`: An input image or images used as an input for the intrinsic decomposition task. For arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the same width and height. num_inference_steps (`int`, *optional*, defaults to `None`): Number of denoising diffusion steps during inference. The default value `None` results in automatic selection. ensemble_size (`int`, defaults to `1`): Number of ensemble predictions. Higher values result in measurable improvements and visual degradation. processing_resolution (`int`, *optional*, defaults to `None`): Effective processing resolution. When set to `0`, matches the larger input image dimension. This produces crisper predictions, but may also lead to the overall loss of global context. The default value `None` resolves to the optimal value from the model config. match_input_resolution (`bool`, *optional*, defaults to `True`): When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer side of the output will equal to `processing_resolution`. resample_method_input (`str`, *optional*, defaults to `"bilinear"`): Resampling method used to resize input images to `processing_resolution`. The accepted values are: `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. resample_method_output (`str`, *optional*, defaults to `"bilinear"`): Resampling method used to resize output predictions to match the input resolution. The accepted values are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`. batch_size (`int`, *optional*, defaults to `1`): Batch size; only matters when setting `ensemble_size` or passing a tensor of images. ensembling_kwargs (`dict`, *optional*, defaults to `None`) Extra dictionary with arguments for precise ensembling control. The following options are available: - reduction (`str`, *optional*, defaults to `"median"`): Defines the ensembling function applied in every pixel location, can be either `"median"` or `"mean"`. latents (`torch.Tensor`, *optional*, defaults to `None`): Latent noise tensors to replace the random initialization. These can be taken from the previous function call's output. generator (`torch.Generator`, or `list[torch.Generator]`, *optional*, defaults to `None`): Random number generator object to ensure reproducibility. output_type (`str`, *optional*, defaults to `"np"`): Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted values are: `"np"` (numpy array) or `"pt"` (torch tensor). output_uncertainty (`bool`, *optional*, defaults to `False`): When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that the `ensemble_size` argument is set to a value above 2. output_latent (`bool`, *optional*, defaults to `False`): When enabled, the output's `latent` field contains the latent codes corresponding to the predictions within the ensemble. These codes can be saved, modified, and used for subsequent calls with the `latents` argument. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.marigold.MarigoldIntrinsicsOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.marigold.MarigoldIntrinsicsOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.marigold.MarigoldIntrinsicsOutput`] is returned, otherwise a `tuple` is returned where the first element is the prediction, the second element is the uncertainty (or `None`), and the third is the latent (or `None`). """ # 0. Resolving variables. device = self._execution_device dtype = self.dtype # Model-specific optimal default values leading to fast and reasonable results. if num_inference_steps is None: num_inference_steps = self.default_denoising_steps if processing_resolution is None: processing_resolution = self.default_processing_resolution # 1. Check inputs. num_images = self.check_inputs( image, num_inference_steps, ensemble_size, processing_resolution, resample_method_input, resample_method_output, batch_size, ensembling_kwargs, latents, generator, output_type, output_uncertainty, ) # 2. Prepare empty text conditioning. # Model invocation: self.tokenizer, self.text_encoder. if self.empty_text_embedding is None: prompt = "" text_inputs = self.tokenizer( prompt, padding="do_not_pad", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024] # 3. Preprocess input images. This function loads input image or images of compatible dimensions `(H, W)`, # optionally downsamples them to the `processing_resolution` `(PH, PW)`, where # `max(PH, PW) == processing_resolution`, and pads the dimensions to `(PPH, PPW)` such that these values are # divisible by the latent space downscaling factor (typically 8 in Stable Diffusion). The default value `None` # of `processing_resolution` resolves to the optimal value from the model config. It is a recommended mode of # operation and leads to the most reasonable results. Using the native image resolution or any other processing # resolution can lead to loss of either fine details or global context in the output predictions. image, padding, original_resolution = self.image_processor.preprocess( image, processing_resolution, resample_method_input, device, dtype ) # [N,3,PPH,PPW] # 4. Encode input image into latent space. At this step, each of the `N` input images is represented with `E` # ensemble members. Each ensemble member is an independent diffused prediction, just initialized independently. # Latents of each such predictions across all input images and all ensemble members are represented in the # `pred_latent` variable. The variable `image_latent` contains each input image encoded into latent space and # replicated `E` times. The variable `pred_latent` contains latents initialization, where the latent space is # replicated `T` times relative to the single latent space of `image_latent`, where `T` is the number of the # predicted targets. The latents can be either generated (see `generator` to ensure reproducibility), or passed # explicitly via the `latents` argument. The latter can be set outside the pipeline code. This behavior can be # achieved by setting the `output_latent` argument to `True`. The latent space dimensions are `(h, w)`. Encoding # into latent space happens in batches of size `batch_size`. # Model invocation: self.vae.encoder. image_latent, pred_latent = self.prepare_latents( image, latents, generator, ensemble_size, batch_size ) # [N*E,4,h,w], [N*E,T*4,h,w] del image batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat( batch_size, 1, 1 ) # [B,1024,2] # 5. Process the denoising loop. All `N * E` latents are processed sequentially in batches of size `batch_size`. # The unet model takes concatenated latent spaces of the input image and the predicted modality as an input, and # outputs noise for the predicted modality's latent space. The number of denoising diffusion steps is defined by # `num_inference_steps`. It is either set directly, or resolves to the optimal value specific to the loaded # model. # Model invocation: self.unet. pred_latents = [] for i in self.progress_bar( range(0, num_images * ensemble_size, batch_size), leave=True, desc="Marigold predictions..." ): batch_image_latent = image_latent[i : i + batch_size] # [B,4,h,w] batch_pred_latent = pred_latent[i : i + batch_size] # [B,T*4,h,w] effective_batch_size = batch_image_latent.shape[0] text = batch_empty_text_embedding[:effective_batch_size] # [B,2,1024] self.scheduler.set_timesteps(num_inference_steps, device=device) for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc="Diffusion steps..."): batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) # [B,(1+T)*4,h,w] noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] # [B,T*4,h,w] batch_pred_latent = self.scheduler.step( noise, t, batch_pred_latent, generator=generator ).prev_sample # [B,T*4,h,w] if XLA_AVAILABLE: xm.mark_step() pred_latents.append(batch_pred_latent) pred_latent = torch.cat(pred_latents, dim=0) # [N*E,T*4,h,w] del ( pred_latents, image_latent, batch_empty_text_embedding, batch_image_latent, batch_pred_latent, text, batch_latent, noise, ) # 6. Decode predictions from latent into pixel space. The resulting `N * E` predictions have shape `(PPH, PPW)`, # which requires slight postprocessing. Decoding into pixel space happens in batches of size `batch_size`. # Model invocation: self.vae.decoder. pred_latent_for_decoding = pred_latent.reshape( num_images * ensemble_size * self.n_targets, self.vae.config.latent_channels, *pred_latent.shape[2:] ) # [N*E*T,4,PPH,PPW] prediction = torch.cat( [ self.decode_prediction(pred_latent_for_decoding[i : i + batch_size]) for i in range(0, pred_latent_for_decoding.shape[0], batch_size) ], dim=0, ) # [N*E*T,3,PPH,PPW] del pred_latent_for_decoding if not output_latent: pred_latent = None # 7. Remove padding. The output shape is (PH, PW). prediction = self.image_processor.unpad_image(prediction, padding) # [N*E*T,3,PH,PW] # 8. Ensemble and compute uncertainty (when `output_uncertainty` is set). This code treats each of the `N*T` # groups of `E` ensemble predictions independently. For each group it computes an ensembled prediction of shape # `(PH, PW)` and an optional uncertainty map of the same dimensions. After computing this pair of outputs for # each group independently, it stacks them respectively into batches of `N*T` almost final predictions and # uncertainty maps. uncertainty = None if ensemble_size > 1: prediction = prediction.reshape( num_images, ensemble_size, self.n_targets, *prediction.shape[1:] ) # [N,E,T,3,PH,PW] prediction = [ self.ensemble_intrinsics(prediction[i], output_uncertainty, **(ensembling_kwargs or {})) for i in range(num_images) ] # [ [[T,3,PH,PW], [T,3,PH,PW]], ... ] prediction, uncertainty = zip(*prediction) # [[T,3,PH,PW], ... ], [[T,3,PH,PW], ... ] prediction = torch.cat(prediction, dim=0) # [N*T,3,PH,PW] if output_uncertainty: uncertainty = torch.cat(uncertainty, dim=0) # [N*T,3,PH,PW] else: uncertainty = None # 9. If `match_input_resolution` is set, the output prediction and the uncertainty are upsampled to match the # input resolution `(H, W)`. This step may introduce upsampling artifacts, and therefore can be disabled. # Depending on the downstream use-case, upsampling can be also chosen based on the tolerated artifacts by # setting the `resample_method_output` parameter (e.g., to `"nearest"`). if match_input_resolution: prediction = self.image_processor.resize_antialias( prediction, original_resolution, resample_method_output, is_aa=False ) # [N*T,3,H,W] if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.resize_antialias( uncertainty, original_resolution, resample_method_output, is_aa=False ) # [N*T,1,H,W] # 10. Prepare the final outputs. if output_type == "np": prediction = self.image_processor.pt_to_numpy(prediction) # [N*T,H,W,3] if uncertainty is not None and output_uncertainty: uncertainty = self.image_processor.pt_to_numpy(uncertainty) # [N*T,H,W,3] # 11. Offload all models self.maybe_free_model_hooks() if not return_dict: return (prediction, uncertainty, pred_latent) return MarigoldIntrinsicsOutput( prediction=prediction, uncertainty=uncertainty, latent=pred_latent, ) def prepare_latents( self, image: torch.Tensor, latents: torch.Tensor | None, generator: torch.Generator | None, ensemble_size: int, batch_size: int, ) -> tuple[torch.Tensor, torch.Tensor]: def retrieve_latents(encoder_output): if hasattr(encoder_output, "latent_dist"): return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") image_latent = torch.cat( [ retrieve_latents(self.vae.encode(image[i : i + batch_size])) for i in range(0, image.shape[0], batch_size) ], dim=0, ) # [N,4,h,w] image_latent = image_latent * self.vae.config.scaling_factor image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w] N_E, C, H, W = image_latent.shape pred_latent = latents if pred_latent is None: pred_latent = randn_tensor( (N_E, self.n_targets * C, H, W), generator=generator, device=image_latent.device, dtype=image_latent.dtype, ) # [N*E,T*4,h,w] return image_latent, pred_latent def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor: if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels: raise ValueError( f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}." ) prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W] prediction = torch.clip(prediction, -1.0, 1.0) # [B,3,H,W] prediction = (prediction + 1.0) / 2.0 return prediction # [B,3,H,W] @staticmethod def ensemble_intrinsics( targets: torch.Tensor, output_uncertainty: bool = False, reduction: str = "median", ) -> tuple[torch.Tensor, torch.Tensor | None]: """ Ensembles the intrinsic decomposition represented by the `targets` tensor with expected shape `(B, T, 3, H, W)`, where B is the number of ensemble members for a given prediction of size `(H x W)`, and T is the number of predicted targets. Args: targets (`torch.Tensor`): Input ensemble of intrinsic image decomposition maps. output_uncertainty (`bool`, *optional*, defaults to `False`): Whether to output uncertainty map. reduction (`str`, *optional*, defaults to `"mean"`): Reduction method used to ensemble aligned predictions. The accepted values are: `"median"` and `"mean"`. Returns: A tensor of aligned and ensembled intrinsic decomposition maps with shape `(T, 3, H, W)` and optionally a tensor of uncertainties of shape `(T, 3, H, W)`. """ if targets.dim() != 5 or targets.shape[2] != 3: raise ValueError(f"Expecting 4D tensor of shape [B,T,3,H,W]; got {targets.shape}.") if reduction not in ("median", "mean"): raise ValueError(f"Unrecognized reduction method: {reduction}.") B, T, _, H, W = targets.shape uncertainty = None if reduction == "mean": prediction = torch.mean(targets, dim=0) # [T,3,H,W] if output_uncertainty: uncertainty = torch.std(targets, dim=0) # [T,3,H,W] elif reduction == "median": prediction = torch.median(targets, dim=0, keepdim=True).values # [1,T,3,H,W] if output_uncertainty: uncertainty = torch.abs(targets - prediction) # [B,T,3,H,W] uncertainty = torch.median(uncertainty, dim=0).values # [T,3,H,W] prediction = prediction.squeeze(0) # [T,3,H,W] else: raise ValueError(f"Unrecognized reduction method: {reduction}.") return prediction, uncertainty
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/marigold/pipeline_marigold_intrinsics.py", "license": "Apache License 2.0", "lines": 641, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/marigold/test_marigold_intrinsics.py
# Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved. # Copyright 2024-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- # More information and citation instructions are available on the # Marigold project website: https://marigoldcomputervision.github.io # -------------------------------------------------------------------------- import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, AutoencoderTiny, DDIMScheduler, MarigoldIntrinsicsPipeline, UNet2DConditionModel, ) from ...testing_utils import ( Expectations, backend_empty_cache, enable_full_determinism, floats_tensor, load_image, require_torch_accelerator, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class MarigoldIntrinsicsPipelineTesterMixin(PipelineTesterMixin): def _test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = diffusers.logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size * output[0].shape[0] # only changed here max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff def _test_inference_batch_consistent( self, batch_sizes=[2], additional_params_copy_to_batched_inputs=["num_inference_steps"], batch_generator=True ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["generator"] = self.get_generator(0) logger = diffusers.logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # prepare batched inputs batched_inputs = [] for batch_size in batch_sizes: batched_input = {} batched_input.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_input[name][-1] = 100 * "very long" else: batched_input[name] = batch_size * [value] if batch_generator and "generator" in inputs: batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_input["batch_size"] = batch_size batched_inputs.append(batched_input) logger.setLevel(level=diffusers.logging.WARNING) for batch_size, batched_input in zip(batch_sizes, batched_inputs): output = pipe(**batched_input) assert len(output[0]) == batch_size * pipe.n_targets # only changed here class MarigoldIntrinsicsPipelineFastTests(MarigoldIntrinsicsPipelineTesterMixin, unittest.TestCase): pipeline_class = MarigoldIntrinsicsPipeline params = frozenset(["image"]) batch_params = frozenset(["image"]) image_params = frozenset(["image"]) image_latents_params = frozenset(["latents"]) callback_cfg_params = frozenset([]) test_xformers_attention = False required_optional_params = frozenset( [ "num_inference_steps", "generator", "output_type", ] ) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=12, out_channels=8, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, beta_schedule="scaled_linear", clip_sample=False, thresholding=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "prediction_type": "intrinsics", } return components def get_dummy_tiny_autoencoder(self): return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "num_inference_steps": 1, "processing_resolution": 0, "generator": generator, "output_type": "np", } return inputs def _test_marigold_intrinsics( self, generator_seed: int = 0, expected_slice: np.ndarray = None, atol: float = 1e-4, **pipe_kwargs, ): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) pipe_inputs = self.get_dummy_inputs(device, seed=generator_seed) pipe_inputs.update(**pipe_kwargs) prediction = pipe(**pipe_inputs).prediction prediction_slice = prediction[0, -3:, -3:, -1].flatten() if pipe_inputs.get("match_input_resolution", True): self.assertEqual(prediction.shape, (2, 32, 32, 3), "Unexpected output resolution") else: self.assertTrue(prediction.shape[0] == 2 and prediction.shape[3] == 3, "Unexpected output dimensions") self.assertEqual( max(prediction.shape[1:3]), pipe_inputs.get("processing_resolution", 768), "Unexpected output resolution", ) np.set_printoptions(precision=5, suppress=True) msg = f"{prediction_slice}" self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol), msg) # self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol)) def test_marigold_depth_dummy_defaults(self): self._test_marigold_intrinsics( expected_slice=np.array([0.6423, 0.40664, 0.41185, 0.65832, 0.63935, 0.43971, 0.51786, 0.55216, 0.47683]), ) def test_marigold_depth_dummy_G0_S1_P32_E1_B1_M1(self): self._test_marigold_intrinsics( generator_seed=0, expected_slice=np.array([0.6423, 0.40664, 0.41185, 0.65832, 0.63935, 0.43971, 0.51786, 0.55216, 0.47683]), num_inference_steps=1, processing_resolution=32, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M1(self): self._test_marigold_intrinsics( generator_seed=0, expected_slice=np.array([0.53132, 0.44487, 0.40164, 0.5326, 0.49073, 0.46979, 0.53324, 0.51366, 0.50387]), num_inference_steps=1, processing_resolution=16, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_depth_dummy_G2024_S1_P32_E1_B1_M1(self): self._test_marigold_intrinsics( generator_seed=2024, expected_slice=np.array([0.40257, 0.39468, 0.51373, 0.4161, 0.40162, 0.58535, 0.43581, 0.47834, 0.48951]), num_inference_steps=1, processing_resolution=32, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_depth_dummy_G0_S2_P32_E1_B1_M1(self): self._test_marigold_intrinsics( generator_seed=0, expected_slice=np.array([0.49636, 0.4518, 0.42722, 0.59044, 0.6362, 0.39011, 0.53522, 0.55153, 0.48699]), num_inference_steps=2, processing_resolution=32, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_depth_dummy_G0_S1_P64_E1_B1_M1(self): self._test_marigold_intrinsics( generator_seed=0, expected_slice=np.array([0.55547, 0.43511, 0.4887, 0.56399, 0.63867, 0.56337, 0.47889, 0.52925, 0.49235]), num_inference_steps=1, processing_resolution=64, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_depth_dummy_G0_S1_P32_E3_B1_M1(self): self._test_marigold_intrinsics( generator_seed=0, expected_slice=np.array([0.57249, 0.49824, 0.54438, 0.57733, 0.52404, 0.5255, 0.56493, 0.56336, 0.48579]), num_inference_steps=1, processing_resolution=32, ensemble_size=3, ensembling_kwargs={"reduction": "mean"}, batch_size=1, match_input_resolution=True, ) def test_marigold_depth_dummy_G0_S1_P32_E4_B2_M1(self): self._test_marigold_intrinsics( generator_seed=0, expected_slice=np.array([0.6294, 0.5575, 0.53414, 0.61077, 0.57156, 0.53974, 0.52956, 0.55467, 0.48751]), num_inference_steps=1, processing_resolution=32, ensemble_size=4, ensembling_kwargs={"reduction": "mean"}, batch_size=2, match_input_resolution=True, ) def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M0(self): self._test_marigold_intrinsics( generator_seed=0, expected_slice=np.array([0.63511, 0.68137, 0.48783, 0.46689, 0.58505, 0.36757, 0.58465, 0.54302, 0.50387]), num_inference_steps=1, processing_resolution=16, ensemble_size=1, batch_size=1, match_input_resolution=False, ) def test_marigold_depth_dummy_no_num_inference_steps(self): with self.assertRaises(ValueError) as e: self._test_marigold_intrinsics( num_inference_steps=None, expected_slice=np.array([0.0]), ) self.assertIn("num_inference_steps", str(e)) def test_marigold_depth_dummy_no_processing_resolution(self): with self.assertRaises(ValueError) as e: self._test_marigold_intrinsics( processing_resolution=None, expected_slice=np.array([0.0]), ) self.assertIn("processing_resolution", str(e)) @slow @require_torch_accelerator class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def _test_marigold_intrinsics( self, is_fp16: bool = True, device: str = "cuda", generator_seed: int = 0, expected_slice: np.ndarray = None, model_id: str = "prs-eth/marigold-iid-appearance-v1-1", image_url: str = "https://marigoldmonodepth.github.io/images/einstein.jpg", atol: float = 1e-3, **pipe_kwargs, ): from_pretrained_kwargs = {} if is_fp16: from_pretrained_kwargs["variant"] = "fp16" from_pretrained_kwargs["torch_dtype"] = torch.float16 pipe = MarigoldIntrinsicsPipeline.from_pretrained(model_id, **from_pretrained_kwargs) if device in ["cuda", "xpu"]: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(generator_seed) image = load_image(image_url) width, height = image.size prediction = pipe(image, generator=generator, **pipe_kwargs).prediction prediction_slice = prediction[0, -3:, -3:, -1].flatten() if pipe_kwargs.get("match_input_resolution", True): self.assertEqual(prediction.shape, (2, height, width, 3), "Unexpected output resolution") else: self.assertTrue(prediction.shape[0] == 2 and prediction.shape[3] == 3, "Unexpected output dimensions") self.assertEqual( max(prediction.shape[1:3]), pipe_kwargs.get("processing_resolution", 768), "Unexpected output resolution", ) msg = f"{prediction_slice}" self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol), msg) # self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol)) def test_marigold_intrinsics_einstein_f32_cpu_G0_S1_P32_E1_B1_M1(self): self._test_marigold_intrinsics( is_fp16=False, device="cpu", generator_seed=0, expected_slice=np.array([0.9162, 0.9162, 0.9162, 0.9162, 0.9162, 0.9162, 0.9162, 0.9162, 0.9162]), num_inference_steps=1, processing_resolution=32, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_intrinsics_einstein_f32_accelerator_G0_S1_P768_E1_B1_M1(self): self._test_marigold_intrinsics( is_fp16=False, device=torch_device, generator_seed=0, expected_slice=np.array([0.62127, 0.61906, 0.61687, 0.61946, 0.61903, 0.61961, 0.61808, 0.62099, 0.62894]), num_inference_steps=1, processing_resolution=768, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P768_E1_B1_M1(self): self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=0, expected_slice=np.array([0.62109, 0.61914, 0.61719, 0.61963, 0.61914, 0.61963, 0.61816, 0.62109, 0.62891]), num_inference_steps=1, processing_resolution=768, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_intrinsics_einstein_f16_accelerator_G2024_S1_P768_E1_B1_M1(self): self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=2024, expected_slice=np.array([0.64111, 0.63916, 0.63623, 0.63965, 0.63916, 0.63965, 0.6377, 0.64062, 0.64941]), num_inference_steps=1, processing_resolution=768, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_intrinsics_einstein_f16_accelerator_G0_S2_P768_E1_B1_M1(self): self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=0, expected_slice=np.array([0.60254, 0.60059, 0.59961, 0.60156, 0.60107, 0.60205, 0.60254, 0.60449, 0.61133]), num_inference_steps=2, processing_resolution=768, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P512_E1_B1_M1(self): self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=0, expected_slice=np.array([0.64551, 0.64453, 0.64404, 0.64502, 0.64844, 0.65039, 0.64502, 0.65039, 0.65332]), num_inference_steps=1, processing_resolution=512, ensemble_size=1, batch_size=1, match_input_resolution=True, ) def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P768_E3_B1_M1(self): expected_slices = Expectations( { ("xpu", 3): np.array( [ 0.62655, 0.62477, 0.62161, 0.62452, 0.62454, 0.62454, 0.62255, 0.62647, 0.63379, ] ), ("cuda", 7): np.array( [ 0.61572, 0.1377, 0.61182, 0.61426, 0.61377, 0.61426, 0.61279, 0.61572, 0.62354, ] ), } ) self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=0, expected_slice=expected_slices.get_expectation(), num_inference_steps=1, processing_resolution=768, ensemble_size=3, ensembling_kwargs={"reduction": "mean"}, batch_size=1, match_input_resolution=True, ) def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P768_E4_B2_M1(self): expected_slices = Expectations( { ("xpu", 3): np.array( [ 0.62988, 0.62792, 0.62548, 0.62841, 0.62792, 0.62792, 0.62646, 0.62939, 0.63721, ] ), ("cuda", 7): np.array( [ 0.61914, 0.6167, 0.61475, 0.61719, 0.61719, 0.61768, 0.61572, 0.61914, 0.62695, ] ), } ) self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=0, expected_slice=expected_slices.get_expectation(), num_inference_steps=1, processing_resolution=768, ensemble_size=4, ensembling_kwargs={"reduction": "mean"}, batch_size=2, match_input_resolution=True, ) def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P512_E1_B1_M0(self): self._test_marigold_intrinsics( is_fp16=True, device=torch_device, generator_seed=0, expected_slice=np.array([0.65332, 0.64697, 0.64648, 0.64844, 0.64697, 0.64111, 0.64941, 0.64209, 0.65332]), num_inference_steps=1, processing_resolution=512, ensemble_size=1, batch_size=1, match_input_resolution=False, )
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/marigold/test_marigold_intrinsics.py", "license": "Apache License 2.0", "lines": 558, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/utils/typing_utils.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Typing utilities: Utilities related to type checking and validation """ from typing import Any, Dict, List, Set, Tuple, Type, Union, get_args, get_origin def _is_valid_type(obj: Any, class_or_tuple: Type | tuple[Type, ...]) -> bool: """ Checks if an object is an instance of any of the provided types. For collections, it checks if every element is of the correct type as well. """ if not isinstance(class_or_tuple, tuple): class_or_tuple = (class_or_tuple,) # Unpack unions unpacked_class_or_tuple = [] for t in class_or_tuple: if get_origin(t) is Union: unpacked_class_or_tuple.extend(get_args(t)) else: unpacked_class_or_tuple.append(t) class_or_tuple = tuple(unpacked_class_or_tuple) if Any in class_or_tuple: return True obj_type = type(obj) # Classes with obj's type class_or_tuple = {t for t in class_or_tuple if isinstance(obj, get_origin(t) or t)} # Singular types (e.g. int, ControlNet, ...) # Untyped collections (e.g. List, but not List[int]) elem_class_or_tuple = {get_args(t) for t in class_or_tuple} if () in elem_class_or_tuple: return True # Typed lists or sets elif obj_type in (list, set): return any(all(_is_valid_type(x, t) for x in obj) for t in elem_class_or_tuple) # Typed tuples elif obj_type is tuple: return any( # Tuples with any length and single type (e.g. Tuple[int, ...]) (len(t) == 2 and t[-1] is Ellipsis and all(_is_valid_type(x, t[0]) for x in obj)) or # Tuples with fixed length and any types (e.g. Tuple[int, str]) (len(obj) == len(t) and all(_is_valid_type(x, tt) for x, tt in zip(obj, t))) for t in elem_class_or_tuple ) # Typed dicts elif obj_type is dict: return any( all(_is_valid_type(k, kt) and _is_valid_type(v, vt) for k, v in obj.items()) for kt, vt in elem_class_or_tuple ) else: return False def _get_detailed_type(obj: Any) -> Type: """ Gets a detailed type for an object, including nested types for collections. """ obj_type = type(obj) if obj_type in (list, set): obj_origin_type = List if obj_type is list else Set elems_type = Union[tuple({_get_detailed_type(x) for x in obj})] return obj_origin_type[elems_type] elif obj_type is tuple: return Tuple[tuple(_get_detailed_type(x) for x in obj)] elif obj_type is dict: keys_type = Union[tuple({_get_detailed_type(k) for k in obj.keys()})] values_type = Union[tuple({_get_detailed_type(k) for k in obj.values()})] return Dict[keys_type, values_type] else: return obj_type
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/utils/typing_utils.py", "license": "Apache License 2.0", "lines": 80, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:utils/extract_tests_from_mixin.py
import argparse import inspect import sys from pathlib import Path from typing import List, Type root_dir = Path(__file__).parent.parent.absolute() sys.path.insert(0, str(root_dir)) parser = argparse.ArgumentParser() parser.add_argument("--type", type=str, default=None) args = parser.parse_args() def get_test_methods_from_class(cls: Type) -> List[str]: """ Get all test method names from a given class. Only returns methods that start with 'test_'. """ test_methods = [] for name, obj in inspect.getmembers(cls): if name.startswith("test_") and inspect.isfunction(obj): test_methods.append(name) return sorted(test_methods) def generate_pytest_pattern(test_methods: List[str]) -> str: """Generate pytest pattern string for the -k flag.""" return " or ".join(test_methods) def generate_pattern_for_mixin(mixin_class: Type) -> str: """ Generate pytest pattern for a specific mixin class. """ if mixin_cls is None: return "" test_methods = get_test_methods_from_class(mixin_class) return generate_pytest_pattern(test_methods) if __name__ == "__main__": mixin_cls = None if args.type == "pipeline": from tests.pipelines.test_pipelines_common import PipelineTesterMixin mixin_cls = PipelineTesterMixin elif args.type == "models": from tests.models.test_modeling_common import ModelTesterMixin mixin_cls = ModelTesterMixin elif args.type == "lora": from tests.lora.utils import PeftLoraLoaderMixinTests mixin_cls = PeftLoraLoaderMixinTests pattern = generate_pattern_for_mixin(mixin_cls) print(pattern)
{ "repo_id": "huggingface/diffusers", "file_path": "utils/extract_tests_from_mixin.py", "license": "Apache License 2.0", "lines": 44, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py
# Copyright 2025 The HunyuanVideo Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import HunyuanVideoLoraLoaderMixin from ...models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import HunyuanVideoPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import HunyuanSkyreelsImageToVideoPipeline, HunyuanVideoTransformer3DModel >>> from diffusers.utils import load_image, export_to_video >>> model_id = "hunyuanvideo-community/HunyuanVideo" >>> transformer_model_id = "Skywork/SkyReels-V1-Hunyuan-I2V" >>> transformer = HunyuanVideoTransformer3DModel.from_pretrained( ... transformer_model_id, torch_dtype=torch.bfloat16 ... ) >>> pipe = HunyuanSkyreelsImageToVideoPipeline.from_pretrained( ... model_id, transformer=transformer, torch_dtype=torch.float16 ... ) >>> pipe.vae.enable_tiling() >>> pipe.to("cuda") >>> prompt = "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot." >>> negative_prompt = "Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion" >>> image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg" ... ) >>> output = pipe( ... image=image, ... prompt=prompt, ... negative_prompt=negative_prompt, ... num_inference_steps=30, ... true_cfg_scale=6.0, ... guidance_scale=1.0, ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=15) ``` """ DEFAULT_PROMPT_TEMPLATE = { "template": ( "<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: " "1. The main content and theme of the video." "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects." "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects." "4. background environment, light, style and atmosphere." "5. camera angles, movements, and transitions used in the video:<|eot_id|>" "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>" ), "crop_start": 95, } # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class HunyuanSkyreelsImageToVideoPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): r""" Pipeline for image-to-video generation using HunyuanVideo. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: text_encoder ([`LlamaModel`]): [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). tokenizer (`LlamaTokenizer`): Tokenizer from [Llava Llama3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). transformer ([`HunyuanVideoTransformer3DModel`]): Conditional Transformer to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLHunyuanVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder_2 ([`CLIPTextModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer_2 (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, text_encoder: LlamaModel, tokenizer: LlamaTokenizerFast, transformer: HunyuanVideoTransformer3DModel, vae: AutoencoderKLHunyuanVideo, scheduler: FlowMatchEulerDiscreteScheduler, text_encoder_2: CLIPTextModel, tokenizer_2: CLIPTokenizer, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, ) self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 8 self.vae_scaling_factor = self.vae.config.scaling_factor if getattr(self, "vae", None) else 0.476986 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) # Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline._get_llama_prompt_embeds def _get_llama_prompt_embeds( self, prompt: str | list[str], prompt_template: dict[str, Any], num_videos_per_prompt: int = 1, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 256, num_hidden_layers_to_skip: int = 2, ) -> tuple[torch.Tensor, torch.Tensor]: device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) prompt = [prompt_template["template"].format(p) for p in prompt] crop_start = prompt_template.get("crop_start", None) if crop_start is None: prompt_template_input = self.tokenizer( prompt_template["template"], padding="max_length", return_tensors="pt", return_length=False, return_overflowing_tokens=False, return_attention_mask=False, ) crop_start = prompt_template_input["input_ids"].shape[-1] # Remove <|eot_id|> token and placeholder {} crop_start -= 2 max_sequence_length += crop_start text_inputs = self.tokenizer( prompt, max_length=max_sequence_length, padding="max_length", truncation=True, return_tensors="pt", return_length=False, return_overflowing_tokens=False, return_attention_mask=True, ) text_input_ids = text_inputs.input_ids.to(device=device) prompt_attention_mask = text_inputs.attention_mask.to(device=device) prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True, ).hidden_states[-(num_hidden_layers_to_skip + 1)] prompt_embeds = prompt_embeds.to(dtype=dtype) if crop_start is not None and crop_start > 0: prompt_embeds = prompt_embeds[:, crop_start:] prompt_attention_mask = prompt_attention_mask[:, crop_start:] # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.repeat(1, num_videos_per_prompt) prompt_attention_mask = prompt_attention_mask.view(batch_size * num_videos_per_prompt, seq_len) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline._get_clip_prompt_embeds def _get_clip_prompt_embeds( self, prompt: str | list[str], num_videos_per_prompt: int = 1, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 77, ) -> torch.Tensor: device = device or self._execution_device dtype = dtype or self.text_encoder_2.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer_2( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False).pooler_output # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, -1) return prompt_embeds # Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], prompt_2: str | list[str] = None, prompt_template: dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, pooled_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 256, ): if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_llama_prompt_embeds( prompt, prompt_template, num_videos_per_prompt, device=device, dtype=dtype, max_sequence_length=max_sequence_length, ) if pooled_prompt_embeds is None: if prompt_2 is None: prompt_2 = prompt pooled_prompt_embeds = self._get_clip_prompt_embeds( prompt, num_videos_per_prompt, device=device, dtype=dtype, max_sequence_length=77, ) return prompt_embeds, pooled_prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video.HunyuanVideoPipeline.check_inputs def check_inputs( self, prompt, prompt_2, height, width, prompt_embeds=None, callback_on_step_end_tensor_inputs=None, prompt_template=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if prompt_template is not None: if not isinstance(prompt_template, dict): raise ValueError(f"`prompt_template` has to be of type `dict` but is {type(prompt_template)}") if "template" not in prompt_template: raise ValueError( f"`prompt_template` has to contain a key `template` but only found {prompt_template.keys()}" ) def prepare_latents( self, image: torch.Tensor, batch_size: int, num_channels_latents: int = 32, height: int = 544, width: int = 960, num_frames: int = 97, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) image = image.unsqueeze(2) # [B, C, 1, H, W] if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i].unsqueeze(0)), generator[i]) for i in range(batch_size) ] else: image_latents = [retrieve_latents(self.vae.encode(img.unsqueeze(0)), generator) for img in image] image_latents = torch.cat(image_latents, dim=0).to(dtype) * self.vae_scaling_factor num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 latent_height, latent_width = height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width) padding_shape = (batch_size, num_channels_latents, num_latent_frames - 1, latent_height, latent_width) latents_padding = torch.zeros(padding_shape, dtype=dtype, device=device) image_latents = torch.cat([image_latents, latents_padding], dim=2) if latents is None: latents = randn_tensor(shape, generator=generator, dtype=dtype, device=device) else: latents = latents.to(dtype=dtype, device=device) return latents, image_latents def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." deprecate( "enable_vae_slicing", "0.40.0", depr_message, ) self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." deprecate( "disable_vae_slicing", "0.40.0", depr_message, ) self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." deprecate( "enable_vae_tiling", "0.40.0", depr_message, ) self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." deprecate( "disable_vae_tiling", "0.40.0", depr_message, ) self.vae.disable_tiling() @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, prompt: str | list[str] = None, prompt_2: str | list[str] = None, negative_prompt: str | list[str] = None, negative_prompt_2: str | list[str] = None, height: int = 544, width: int = 960, num_frames: int = 97, num_inference_steps: int = 50, sigmas: list[float] = None, true_cfg_scale: float = 6.0, guidance_scale: float = 1.0, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, pooled_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_pooled_prompt_embeds: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], prompt_template: dict[str, Any] = DEFAULT_PROMPT_TEMPLATE, max_sequence_length: int = 256, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `list[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is will be used instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). negative_prompt_2 (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. height (`int`, defaults to `720`): The height in pixels of the generated image. width (`int`, defaults to `1280`): The width in pixels of the generated image. num_frames (`int`, defaults to `129`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. true_cfg_scale (`float`, *optional*, defaults to 1.0): When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. guidance_scale (`float`, defaults to `6.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. Note that the only available HunyuanVideo model is CFG-distilled, which means that traditional guidance between unconditional and conditional latent is not applied. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~HunyuanVideoPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, prompt_embeds, callback_on_step_end_tensor_inputs, prompt_template, ) has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None ) do_true_cfg = true_cfg_scale > 1 and has_neg_prompt self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt transformer_dtype = self.transformer.dtype prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_template=prompt_template, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, prompt_attention_mask=prompt_attention_mask, device=device, max_sequence_length=max_sequence_length, ) prompt_embeds = prompt_embeds.to(transformer_dtype) prompt_attention_mask = prompt_attention_mask.to(transformer_dtype) pooled_prompt_embeds = pooled_prompt_embeds.to(transformer_dtype) if do_true_cfg: negative_prompt_embeds, negative_pooled_prompt_embeds, negative_prompt_attention_mask = self.encode_prompt( prompt=negative_prompt, prompt_2=negative_prompt_2, prompt_template=prompt_template, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=negative_pooled_prompt_embeds, prompt_attention_mask=negative_prompt_attention_mask, device=device, max_sequence_length=max_sequence_length, ) negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) negative_prompt_attention_mask = negative_prompt_attention_mask.to(transformer_dtype) negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.to(transformer_dtype) # 4. Prepare timesteps sigmas = np.linspace(1.0, 0.0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, sigmas=sigmas ) # 5. Prepare latent variables vae_dtype = self.vae.dtype image = self.video_processor.preprocess(image, height=height, width=width).to(device, vae_dtype) num_channels_latents = self.transformer.config.in_channels // 2 latents, image_latents = self.prepare_latents( image, batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, ) latent_image_input = image_latents.to(transformer_dtype) # 6. Prepare guidance condition guidance = torch.tensor([guidance_scale] * latents.shape[0], dtype=transformer_dtype, device=device) * 1000.0 # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = latents.to(transformer_dtype) latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=1) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, pooled_projections=pooled_prompt_embeds, guidance=guidance, attention_kwargs=attention_kwargs, return_dict=False, )[0] if do_true_cfg: neg_noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, encoder_attention_mask=negative_prompt_attention_mask, pooled_projections=negative_pooled_prompt_embeds, guidance=guidance, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return HunyuanVideoPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/hunyuan_video/pipeline_hunyuan_skyreels_image2video.py", "license": "Apache License 2.0", "lines": 734, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, LlamaConfig, LlamaModel, LlamaTokenizer from diffusers import ( AutoencoderKLHunyuanVideo, FlowMatchEulerDiscreteScheduler, HunyuanSkyreelsImageToVideoPipeline, HunyuanVideoTransformer3DModel, ) from ...testing_utils import enable_full_determinism, torch_device from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np enable_full_determinism() class HunyuanSkyreelsImageToVideoPipelineFastTests( PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, unittest.TestCase ): pipeline_class = HunyuanSkyreelsImageToVideoPipeline params = frozenset( ["image", "prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"] ) batch_params = frozenset(["prompt", "image"]) required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False # there is no xformers processor for Flux test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): torch.manual_seed(0) transformer = HunyuanVideoTransformer3DModel( in_channels=8, out_channels=4, num_attention_heads=2, attention_head_dim=10, num_layers=num_layers, num_single_layers=num_single_layers, num_refiner_layers=1, patch_size=1, patch_size_t=1, guidance_embeds=True, text_embed_dim=16, pooled_projection_dim=8, rope_axes_dim=(2, 4, 4), ) torch.manual_seed(0) vae = AutoencoderKLHunyuanVideo( in_channels=3, out_channels=3, latent_channels=4, down_block_types=( "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", ), up_block_types=( "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", ), block_out_channels=(8, 8, 8, 8), layers_per_block=1, act_fn="silu", norm_num_groups=4, scaling_factor=0.476986, spatial_compression_ratio=8, temporal_compression_ratio=4, mid_block_add_attention=True, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) llama_text_encoder_config = LlamaConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=8, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = LlamaModel(llama_text_encoder_config) tokenizer = LlamaTokenizer.from_pretrained("finetrainers/dummy-hunyaunvideo", subfolder="tokenizer") torch.manual_seed(0) text_encoder_2 = CLIPTextModel(clip_text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image_height = 16 image_width = 16 image = Image.new("RGB", (image_width, image_height)) inputs = { "image": image, "prompt": "dance monkey", "prompt_template": { "template": "{}", "crop_start": 0, }, "generator": generator, "num_inference_steps": 2, "guidance_scale": 4.5, "height": 16, "width": 16, # 4 * k + 1 is the recommendation "num_frames": 9, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 16, 16)) # fmt: off expected_slice = torch.tensor([0.5832, 0.5498, 0.4839, 0.4744, 0.4515, 0.4832, 0.496, 0.563, 0.5918, 0.5979, 0.5101, 0.6168, 0.6613, 0.536, 0.55, 0.5775]) # fmt: on generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue( torch.allclose(generated_slice, expected_slice, atol=1e-3), "The generated video does not match the expected slice.", ) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): # Seems to require higher tolerance than the other tests expected_diff_max = 0.6 generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling pipe.vae.enable_tiling( tile_sample_min_height=96, tile_sample_min_width=96, tile_sample_stride_height=64, tile_sample_stride_width=64, ) inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", ) # TODO(aryan): Create a dummy gemma model with smol vocab size @unittest.skip( "A very small vocab size is used for fast tests. So, Any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_consistent(self): pass @unittest.skip( "A very small vocab size is used for fast tests. So, Any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_single_identical(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py", "license": "Apache License 2.0", "lines": 296, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/utils/source_code_parsing_utils.py
import ast import importlib import inspect import textwrap class ReturnNameVisitor(ast.NodeVisitor): """Thanks to ChatGPT for pairing.""" def __init__(self): self.return_names = [] def visit_Return(self, node): # Check if the return value is a tuple. if isinstance(node.value, ast.Tuple): for elt in node.value.elts: if isinstance(elt, ast.Name): self.return_names.append(elt.id) else: try: self.return_names.append(ast.unparse(elt)) except Exception: self.return_names.append(str(elt)) else: if isinstance(node.value, ast.Name): self.return_names.append(node.value.id) else: try: self.return_names.append(ast.unparse(node.value)) except Exception: self.return_names.append(str(node.value)) self.generic_visit(node) def _determine_parent_module(self, cls): from diffusers import DiffusionPipeline from diffusers.models.modeling_utils import ModelMixin if issubclass(cls, DiffusionPipeline): return "pipelines" elif issubclass(cls, ModelMixin): return "models" else: raise NotImplementedError def get_ast_tree(self, cls, attribute_name="encode_prompt"): parent_module_name = self._determine_parent_module(cls) main_module = importlib.import_module(f"diffusers.{parent_module_name}") current_cls_module = getattr(main_module, cls.__name__) source_code = inspect.getsource(getattr(current_cls_module, attribute_name)) source_code = textwrap.dedent(source_code) tree = ast.parse(source_code) return tree
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/utils/source_code_parsing_utils.py", "license": "Apache License 2.0", "lines": 45, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:tests/others/test_check_support_list.py
import os import sys import unittest from unittest.mock import mock_open, patch git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) from check_support_list import check_documentation # noqa: E402 class TestCheckSupportList(unittest.TestCase): def setUp(self): # Mock doc and source contents that we can reuse self.doc_content = """# Documentation ## FooProcessor [[autodoc]] module.FooProcessor ## BarProcessor [[autodoc]] module.BarProcessor """ self.source_content = """ class FooProcessor(nn.Module): pass class BarProcessor(nn.Module): pass """ def test_check_documentation_all_documented(self): # In this test, both FooProcessor and BarProcessor are documented with patch("builtins.open", mock_open(read_data=self.doc_content)) as doc_file: doc_file.side_effect = [ mock_open(read_data=self.doc_content).return_value, mock_open(read_data=self.source_content).return_value, ] undocumented = check_documentation( doc_path="fake_doc.md", src_path="fake_source.py", doc_regex=r"\[\[autodoc\]\]\s([^\n]+)", src_regex=r"class\s+(\w+Processor)\(.*?nn\.Module.*?\):", ) self.assertEqual(len(undocumented), 0, f"Expected no undocumented classes, got {undocumented}") def test_check_documentation_missing_class(self): # In this test, only FooProcessor is documented, but BarProcessor is missing from the docs doc_content_missing = """# Documentation ## FooProcessor [[autodoc]] module.FooProcessor """ with patch("builtins.open", mock_open(read_data=doc_content_missing)) as doc_file: doc_file.side_effect = [ mock_open(read_data=doc_content_missing).return_value, mock_open(read_data=self.source_content).return_value, ] undocumented = check_documentation( doc_path="fake_doc.md", src_path="fake_source.py", doc_regex=r"\[\[autodoc\]\]\s([^\n]+)", src_regex=r"class\s+(\w+Processor)\(.*?nn\.Module.*?\):", ) self.assertIn("BarProcessor", undocumented, f"BarProcessor should be undocumented, got {undocumented}")
{ "repo_id": "huggingface/diffusers", "file_path": "tests/others/test_check_support_list.py", "license": "Apache License 2.0", "lines": 54, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:utils/check_support_list.py
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # """ Utility that checks that modules like attention processors are listed in the documentation file. ```bash python utils/check_support_list.py ``` It has no auto-fix mode. """ import os import re # All paths are set with the intent that you run this script from the root of the repo REPO_PATH = "." def read_documented_classes(doc_path, autodoc_regex=r"\[\[autodoc\]\]\s([^\n]+)"): """ Reads documented classes from a doc file using a regex to find lines like [[autodoc]] my.module.Class. Returns a list of documented class names (just the class name portion). """ with open(os.path.join(REPO_PATH, doc_path), "r") as f: doctext = f.read() matches = re.findall(autodoc_regex, doctext) return [match.split(".")[-1] for match in matches] def read_source_classes(src_path, class_regex, exclude_conditions=None): """ Reads class names from a source file using a regex that captures class definitions. Optionally exclude classes based on a list of conditions (functions that take class name and return bool). """ if exclude_conditions is None: exclude_conditions = [] with open(os.path.join(REPO_PATH, src_path), "r") as f: doctext = f.read() classes = re.findall(class_regex, doctext) # Filter out classes that meet any of the exclude conditions filtered_classes = [c for c in classes if not any(cond(c) for cond in exclude_conditions)] return filtered_classes def check_documentation(doc_path, src_path, doc_regex, src_regex, exclude_conditions=None): """ Generic function to check if all classes defined in `src_path` are documented in `doc_path`. Returns a set of undocumented class names. """ documented = set(read_documented_classes(doc_path, doc_regex)) source_classes = set(read_source_classes(src_path, src_regex, exclude_conditions=exclude_conditions)) # Find which classes in source are not documented in a deterministic way. undocumented = sorted(source_classes - documented) return undocumented if __name__ == "__main__": # Define the checks we need to perform checks = { "Attention Processors": { "doc_path": "docs/source/en/api/attnprocessor.md", "src_path": "src/diffusers/models/attention_processor.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]", "exclude_conditions": [lambda c: "LoRA" in c, lambda c: c == "Attention"], }, "Image Processors": { "doc_path": "docs/source/en/api/image_processor.md", "src_path": "src/diffusers/image_processor.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]", }, "Activations": { "doc_path": "docs/source/en/api/activations.md", "src_path": "src/diffusers/models/activations.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", }, "Normalizations": { "doc_path": "docs/source/en/api/normalization.md", "src_path": "src/diffusers/models/normalization.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", "exclude_conditions": [ # Exclude LayerNorm as it's an intentional exception lambda c: c == "LayerNorm" ], }, "LoRA Mixins": { "doc_path": "docs/source/en/api/loaders/lora.md", "src_path": "src/diffusers/loaders/lora_pipeline.py", "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", "src_regex": r"class\s+(\w+LoraLoaderMixin(?:\d*_?\d*))[:(]", }, } missing_items = {} for category, params in checks.items(): undocumented = check_documentation( doc_path=params["doc_path"], src_path=params["src_path"], doc_regex=params["doc_regex"], src_regex=params["src_regex"], exclude_conditions=params.get("exclude_conditions"), ) if undocumented: missing_items[category] = undocumented # If we have any missing items, raise a single combined error if missing_items: error_msg = ["Some classes are not documented properly:\n"] for category, classes in missing_items.items(): error_msg.append(f"- {category}: {', '.join(sorted(classes))}") raise ValueError("\n".join(error_msg))
{ "repo_id": "huggingface/diffusers", "file_path": "utils/check_support_list.py", "license": "Apache License 2.0", "lines": 108, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/dreambooth/test_dreambooth_lora_lumina2.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile import safetensors sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class DreamBoothLoRAlumina2(ExamplesTestsAccelerate): instance_data_dir = "docs/source/en/imgs" pretrained_model_name_or_path = "hf-internal-testing/tiny-lumina2-pipe" script_path = "examples/dreambooth/train_dreambooth_lora_lumina2.py" transformer_layer_type = "layers.0.attn.to_k" def test_dreambooth_lora_lumina2(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --resolution 32 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --max_sequence_length 16 """.split() test_args.extend(["--instance_prompt", ""]) run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_latent_caching(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --resolution 32 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --cache_latents --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --max_sequence_length 16 """.split() test_args.extend(["--instance_prompt", ""]) run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_layers(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --resolution 32 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --cache_latents --learning_rate 5.0e-04 --scale_lr --lora_layers {self.transformer_layer_type} --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --max_sequence_length 16 """.split() test_args.extend(["--instance_prompt", ""]) run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. In this test, we only params of # `self.transformer_layer_type` should be in the state dict. starts_with_transformer = all(self.transformer_layer_type in key for key in lora_state_dict) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_lumina2_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --resolution=32 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 --max_sequence_length 16 """.split() test_args.extend(["--instance_prompt", ""]) run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_lora_lumina2_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --resolution=32 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=4 --checkpointing_steps=2 --max_sequence_length 166 """.split() test_args.extend(["--instance_prompt", ""]) run_command(self._launch_args + test_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}) resume_run_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --resolution=32 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --max_sequence_length 16 """.split() resume_run_args.extend(["--instance_prompt", ""]) run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
{ "repo_id": "huggingface/diffusers", "file_path": "examples/dreambooth/test_dreambooth_lora_lumina2.py", "license": "Apache License 2.0", "lines": 176, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/dreambooth/train_dreambooth_lora_lumina2.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import copy import itertools import logging import math import os import random import shutil import warnings from contextlib import nullcontext from pathlib import Path import numpy as np import torch import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from huggingface_hub.utils import insecure_hashlib from peft import LoraConfig, set_peft_model_state_dict from peft.utils import get_peft_model_state_dict from PIL import Image from PIL.ImageOps import exif_transpose from torch.utils.data import Dataset from torchvision import transforms from torchvision.transforms.functional import crop from tqdm.auto import tqdm from transformers import AutoTokenizer, Gemma2Model import diffusers from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Pipeline, Lumina2Transformer2DModel, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import ( cast_training_params, compute_density_for_timestep_sampling, compute_loss_weighting_for_sd3, free_memory, ) from diffusers.utils import ( check_min_version, convert_unet_state_dict_to_peft, is_wandb_available, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_torch_npu_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.37.0.dev0") logger = get_logger(__name__) if is_torch_npu_available(): torch.npu.config.allow_internal_format = False def save_model_card( repo_id: str, images=None, base_model: str = None, instance_prompt=None, system_prompt=None, validation_prompt=None, repo_folder=None, ): widget_dict = [] if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) widget_dict.append( {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}} ) model_description = f""" # Lumina2 DreamBooth LoRA - {repo_id} <Gallery /> ## Model description These are {repo_id} DreamBooth LoRA weights for {base_model}. The weights were trained using [DreamBooth](https://dreambooth.github.io/) with the [Lumina2 diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_lumina2.md). ## Trigger words You should use `{instance_prompt}` to trigger the image generation. The following `system_prompt` was also used used during training (ignore if `None`): {system_prompt}. ## Download model [Download the *.safetensors LoRA]({repo_id}/tree/main) in the Files & versions tab. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py TODO ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="apache-2.0", base_model=base_model, prompt=instance_prompt, model_description=model_description, widget=widget_dict, ) tags = [ "text-to-image", "diffusers-training", "diffusers", "lora", "lumina2", "lumina2-diffusers", "template:sd-lora", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation( pipeline, args, accelerator, pipeline_args, epoch, is_final_validation=False, ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None autocast_ctx = torch.autocast(accelerator.device.type) if not is_final_validation else nullcontext() with autocast_ctx: images = [pipeline(**pipeline_args, generator=generator).images[0] for _ in range(args.num_validation_images)] for tracker in accelerator.trackers: phase_name = "test" if is_final_validation else "validation" if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images(phase_name, np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { phase_name: [ wandb.Image(image, caption=f"{i}: {pipeline_args['prompt']}") for i, image in enumerate(images) ] } ) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() return images def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--instance_data_dir", type=str, default=None, help=("A folder containing the training data. "), ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image. By " "default, the standard Image Dataset maps out 'file_name' " "to 'image'.", ) parser.add_argument( "--caption_column", type=str, default=None, help="The column of the dataset containing the instance prompt for each image", ) parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.") parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--max_sequence_length", type=int, default=256, help="Maximum sequence length to use with with the Gemma2 model", ) parser.add_argument( "--system_prompt", type=str, default=None, help="System prompt to use during inference to give the Gemma2 model certain characteristics.", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--final_validation_prompt", type=str, default=None, help="A prompt that is used during a final validation to verify that the model is learning. Ignored if `--validation_prompt` is provided.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=50, help=( "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--rank", type=int, default=4, help=("The dimension of the LoRA update matrices."), ) parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="lumina2-dreambooth-lora", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--weighting_scheme", type=str, default="none", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), ) parser.add_argument( "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--mode_scale", type=float, default=1.29, help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", ) parser.add_argument( "--optimizer", type=str, default="AdamW", help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--prodigy_beta3", type=float, default=None, help="coefficients for computing the Prodigy stepsize using running averages. If set to None, " "uses the value of square root of beta2. Ignored if optimizer is adamW", ) parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay") parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params") parser.add_argument( "--lora_layers", type=str, default=None, help=( 'The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. E.g. - "to_k,to_q,to_v" will result in lora training of attention layers only' ), ) parser.add_argument( "--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer and Prodigy optimizers.", ) parser.add_argument( "--prodigy_use_bias_correction", type=bool, default=True, help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW", ) parser.add_argument( "--prodigy_safeguard_warmup", type=bool, default=True, help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. " "Ignored if optimizer is adamW", ) parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--cache_latents", action="store_true", default=False, help="Cache the VAE latents", ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--upcast_before_saving", action="store_true", default=False, help=( "Whether to upcast the trained transformer layers to float32 before saving (at the end of training). " "Defaults to precision dtype used for training to save memory" ), ) parser.add_argument( "--image_interpolation_mode", type=str, default="lanczos", choices=[ f.lower() for f in dir(transforms.InterpolationMode) if not f.startswith("__") and not f.endswith("__") ], help="The image interpolation method to use for resizing images.", ) parser.add_argument( "--offload", action="store_true", help="Whether to offload the VAE and the text encoder to CPU when they are not used.", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() if args.dataset_name is None and args.instance_data_dir is None: raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`") if args.dataset_name is not None and args.instance_data_dir is not None: raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`") env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images. """ def __init__( self, instance_data_root, instance_prompt, class_prompt, class_data_root=None, class_num=None, size=1024, repeats=1, center_crop=False, ): self.size = size self.center_crop = center_crop self.instance_prompt = instance_prompt self.custom_instance_prompts = None self.class_prompt = class_prompt # if --dataset_name is provided or a metadata jsonl file is provided in the local --instance_data directory, # we load the training data using load_dataset if args.dataset_name is not None: try: from datasets import load_dataset except ImportError: raise ImportError( "You are trying to load your data using the datasets library. If you wish to train using custom " "captions please install the datasets library: `pip install datasets`. If you wish to load a " "local folder containing images only, specify --instance_data_dir instead." ) # Downloading and loading a dataset from the hub. # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) # Preprocessing the datasets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. if args.image_column is None: image_column = column_names[0] logger.info(f"image column defaulting to {image_column}") else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) instance_images = dataset["train"][image_column] if args.caption_column is None: logger.info( "No caption column provided, defaulting to instance_prompt for all images. If your dataset " "contains captions/prompts for the images, make sure to specify the " "column as --caption_column" ) self.custom_instance_prompts = None else: if args.caption_column not in column_names: raise ValueError( f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) custom_instance_prompts = dataset["train"][args.caption_column] # create final list of captions according to --repeats self.custom_instance_prompts = [] for caption in custom_instance_prompts: self.custom_instance_prompts.extend(itertools.repeat(caption, repeats)) else: self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())] self.custom_instance_prompts = None self.instance_images = [] for img in instance_images: self.instance_images.extend(itertools.repeat(img, repeats)) self.pixel_values = [] interpolation = getattr(transforms.InterpolationMode, args.image_interpolation_mode.upper(), None) if interpolation is None: raise ValueError(f"Unsupported interpolation mode: {args.image_interpolation_mode}") train_resize = transforms.Resize(size, interpolation=interpolation) train_crop = transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size) train_flip = transforms.RandomHorizontalFlip(p=1.0) train_transforms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) for image in self.instance_images: image = exif_transpose(image) if not image.mode == "RGB": image = image.convert("RGB") image = train_resize(image) if args.random_flip and random.random() < 0.5: # flip image = train_flip(image) if args.center_crop: y1 = max(0, int(round((image.height - args.resolution) / 2.0))) x1 = max(0, int(round((image.width - args.resolution) / 2.0))) image = train_crop(image) else: y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution)) image = crop(image, y1, x1, h, w) image = train_transforms(image) self.pixel_values.append(image) self.num_instance_images = len(self.instance_images) self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) if class_num is not None: self.num_class_images = min(len(self.class_images_path), class_num) else: self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=interpolation), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = self.pixel_values[index % self.num_instance_images] example["instance_images"] = instance_image if self.custom_instance_prompts: caption = self.custom_instance_prompts[index % self.num_instance_images] if caption: example["instance_prompt"] = caption else: example["instance_prompt"] = self.instance_prompt else: # custom prompts were provided, but length does not match size of image dataset example["instance_prompt"] = self.instance_prompt if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) class_image = exif_transpose(class_image) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt"] = self.class_prompt return example def collate_fn(examples, with_prior_preservation=False): pixel_values = [example["instance_images"] for example in examples] prompts = [example["instance_prompt"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: pixel_values += [example["class_images"] for example in examples] prompts += [example["class_prompt"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() batch = {"pixel_values": pixel_values, "prompts": prompts} return batch class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) if torch.backends.mps.is_available() and args.mixed_precision == "bf16": # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs], ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: pipeline = Lumina2Pipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch.bfloat16 if args.mixed_precision == "bf16" else torch.float16, revision=args.revision, variant=args.variant, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline free_memory() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, ).repo_id # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, ) # Load scheduler and models noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder="scheduler", revision=args.revision ) noise_scheduler_copy = copy.deepcopy(noise_scheduler) text_encoder = Gemma2Model.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant, ) transformer = Lumina2Transformer2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant ) # We only train the additional adapter LoRA layers transformer.requires_grad_(False) vae.requires_grad_(False) text_encoder.requires_grad_(False) # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16: # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) # keep VAE in FP32 to ensure numerical stability. vae.to(dtype=torch.float32) transformer.to(accelerator.device, dtype=weight_dtype) # because Gemma2 is particularly suited for bfloat16. text_encoder.to(dtype=torch.bfloat16) # Initialize a text encoding pipeline and keep it to CPU for now. text_encoding_pipeline = Lumina2Pipeline.from_pretrained( args.pretrained_model_name_or_path, vae=None, transformer=None, text_encoder=text_encoder, tokenizer=tokenizer, ) if args.gradient_checkpointing: transformer.enable_gradient_checkpointing() if args.lora_layers is not None: target_modules = [layer.strip() for layer in args.lora_layers.split(",")] else: target_modules = ["to_k", "to_q", "to_v", "to_out.0"] # now we will add new LoRA weights the transformer layers transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.rank, lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) transformer.add_adapter(transformer_lora_config) def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: transformer_lora_layers_to_save = None for model in models: if isinstance(model, type(unwrap_model(transformer))): transformer_lora_layers_to_save = get_peft_model_state_dict(model) else: raise ValueError(f"unexpected save model: {model.__class__}") # make sure to pop weight so that corresponding model is not saved again weights.pop() Lumina2Pipeline.save_lora_weights( output_dir, transformer_lora_layers=transformer_lora_layers_to_save, ) def load_model_hook(models, input_dir): transformer_ = None while len(models) > 0: model = models.pop() if isinstance(model, type(unwrap_model(transformer))): transformer_ = model else: raise ValueError(f"unexpected save model: {model.__class__}") lora_state_dict = Lumina2Pipeline.lora_state_dict(input_dir) transformer_state_dict = { f"{k.replace('transformer.', '')}": v for k, v in lora_state_dict.items() if k.startswith("transformer.") } transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict) incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default") if incompatible_keys is not None: # check only for unexpected keys unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) if unexpected_keys: logger.warning( f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " f" {unexpected_keys}. " ) # Make sure the trainable params are in float32. This is again needed since the base models # are in `weight_dtype`. More details: # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804 if args.mixed_precision == "fp16": models = [transformer_] # only upcast trainable parameters (LoRA) into fp32 cast_training_params(models) accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32 and torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Make sure the trainable params are in float32. if args.mixed_precision == "fp16": models = [transformer] # only upcast trainable parameters (LoRA) into fp32 cast_training_params(models, dtype=torch.float32) transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters())) # Optimization parameters transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate} params_to_optimize = [transformer_parameters_with_lr] # Optimizer creation if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): logger.warning( f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." "Defaulting to adamW" ) args.optimizer = "adamw" if args.use_8bit_adam and not args.optimizer.lower() == "adamw": logger.warning( f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"set to {args.optimizer.lower()}" ) if args.optimizer.lower() == "adamw": if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW optimizer = optimizer_class( params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) if args.optimizer.lower() == "prodigy": try: import prodigyopt except ImportError: raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`") optimizer_class = prodigyopt.Prodigy if args.learning_rate <= 0.1: logger.warning( "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" ) optimizer = optimizer_class( params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), beta3=args.prodigy_beta3, weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, decouple=args.prodigy_decouple, use_bias_correction=args.prodigy_use_bias_correction, safeguard_warmup=args.prodigy_safeguard_warmup, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_prompt=args.class_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_num=args.num_class_images, size=args.resolution, repeats=args.repeats, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.dataloader_num_workers, ) def compute_text_embeddings(prompt, text_encoding_pipeline): text_encoding_pipeline = text_encoding_pipeline.to(accelerator.device) with torch.no_grad(): prompt_embeds, prompt_attention_mask, _, _ = text_encoding_pipeline.encode_prompt( prompt, max_sequence_length=args.max_sequence_length, system_prompt=args.system_prompt, ) if args.offload: text_encoding_pipeline = text_encoding_pipeline.to("cpu") prompt_embeds = prompt_embeds.to(transformer.dtype) return prompt_embeds, prompt_attention_mask # If no type of tuning is done on the text_encoder and custom instance prompts are NOT # provided (i.e. the --instance_prompt is used for all images), we encode the instance prompt once to avoid # the redundant encoding. if not train_dataset.custom_instance_prompts: instance_prompt_hidden_states, instance_prompt_attention_mask = compute_text_embeddings( args.instance_prompt, text_encoding_pipeline ) # Handle class prompt for prior-preservation. if args.with_prior_preservation: class_prompt_hidden_states, class_prompt_attention_mask = compute_text_embeddings( args.class_prompt, text_encoding_pipeline ) # Clear the memory here if not train_dataset.custom_instance_prompts: del text_encoder, tokenizer free_memory() # If custom instance prompts are NOT provided (i.e. the instance prompt is used for all images), # pack the statically computed variables appropriately here. This is so that we don't # have to pass them to the dataloader. if not train_dataset.custom_instance_prompts: prompt_embeds = instance_prompt_hidden_states prompt_attention_mask = instance_prompt_attention_mask if args.with_prior_preservation: prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0) prompt_attention_mask = torch.cat([prompt_attention_mask, class_prompt_attention_mask], dim=0) vae_config_scaling_factor = vae.config.scaling_factor vae_config_shift_factor = vae.config.shift_factor if args.cache_latents: latents_cache = [] vae = vae.to(accelerator.device) for batch in tqdm(train_dataloader, desc="Caching latents"): with torch.no_grad(): batch["pixel_values"] = batch["pixel_values"].to( accelerator.device, non_blocking=True, dtype=vae.dtype ) latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) if args.validation_prompt is None: del vae free_memory() # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( transformer, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_name = "dreambooth-lumina2-lora" accelerator.init_trackers(tracker_name, config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype) schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device) timesteps = timesteps.to(accelerator.device) step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < n_dim: sigma = sigma.unsqueeze(-1) return sigma for epoch in range(first_epoch, args.num_train_epochs): transformer.train() for step, batch in enumerate(train_dataloader): models_to_accumulate = [transformer] prompts = batch["prompts"] with accelerator.accumulate(models_to_accumulate): # encode batch prompts when custom prompts are provided for each image - if train_dataset.custom_instance_prompts: prompt_embeds, prompt_attention_mask = compute_text_embeddings(prompts, text_encoding_pipeline) # Convert images to latent space if args.cache_latents: model_input = latents_cache[step].sample() else: vae = vae.to(accelerator.device) pixel_values = batch["pixel_values"].to(dtype=vae.dtype) model_input = vae.encode(pixel_values).latent_dist.sample() if args.offload: vae = vae.to("cpu") model_input = (model_input - vae_config_shift_factor) * vae_config_scaling_factor model_input = model_input.to(dtype=weight_dtype) # Sample noise that we'll add to the latents noise = torch.randn_like(model_input) bsz = model_input.shape[0] # Sample a random timestep for each image # for weighting schemes where we sample timesteps non-uniformly u = compute_density_for_timestep_sampling( weighting_scheme=args.weighting_scheme, batch_size=bsz, logit_mean=args.logit_mean, logit_std=args.logit_std, mode_scale=args.mode_scale, ) indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device) # Add noise according to flow matching. # zt = (1 - texp) * x + texp * z1 # Lumina2 reverses the lerp i.e., sigma of 1.0 should mean `model_input` sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype) noisy_model_input = (1.0 - sigmas) * noise + sigmas * model_input # Predict the noise residual # scale the timesteps (reversal not needed as we used a reverse lerp above already) timesteps = timesteps / noise_scheduler.config.num_train_timesteps model_pred = transformer( hidden_states=noisy_model_input, encoder_hidden_states=prompt_embeds.repeat(len(prompts), 1, 1) if not train_dataset.custom_instance_prompts else prompt_embeds, encoder_attention_mask=prompt_attention_mask.repeat(len(prompts), 1) if not train_dataset.custom_instance_prompts else prompt_attention_mask, timestep=timesteps, return_dict=False, )[0] # these weighting schemes use a uniform timestep sampling # and instead post-weight the loss weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) # flow matching loss (reversed) target = model_input - noise if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute prior loss prior_loss = torch.mean( (weighting.float() * (model_pred_prior.float() - target_prior.float()) ** 2).reshape( target_prior.shape[0], -1 ), 1, ) prior_loss = prior_loss.mean() # Compute regular loss. loss = torch.mean( (weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), 1, ) loss = loss.mean() if args.with_prior_preservation: # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = transformer.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: # create pipeline pipeline = Lumina2Pipeline.from_pretrained( args.pretrained_model_name_or_path, transformer=accelerator.unwrap_model(transformer), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline_args = {"prompt": args.validation_prompt, "system_prompt": args.system_prompt} images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=pipeline_args, epoch=epoch, ) free_memory() images = None del pipeline # Save the lora layers accelerator.wait_for_everyone() if accelerator.is_main_process: transformer = unwrap_model(transformer) if args.upcast_before_saving: transformer.to(torch.float32) else: transformer = transformer.to(weight_dtype) transformer_lora_layers = get_peft_model_state_dict(transformer) Lumina2Pipeline.save_lora_weights( save_directory=args.output_dir, transformer_lora_layers=transformer_lora_layers, ) # Final inference # Load previous pipeline pipeline = Lumina2Pipeline.from_pretrained( args.pretrained_model_name_or_path, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) # load attention processors pipeline.load_lora_weights(args.output_dir) # run inference images = [] if (args.validation_prompt and args.num_validation_images > 0) or (args.final_validation_prompt): prompt_to_use = args.validation_prompt if args.validation_prompt else args.final_validation_prompt args.num_validation_images = args.num_validation_images if args.num_validation_images else 1 pipeline_args = {"prompt": prompt_to_use, "system_prompt": args.system_prompt} images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=pipeline_args, epoch=epoch, is_final_validation=True, ) if args.push_to_hub: validation_prpmpt = args.validation_prompt if args.validation_prompt else args.final_validation_prompt save_model_card( repo_id, images=images, base_model=args.pretrained_model_name_or_path, instance_prompt=args.instance_prompt, system_prompt=args.system_prompt, validation_prompt=validation_prpmpt, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) images = None del pipeline accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/dreambooth/train_dreambooth_lora_lumina2.py", "license": "Apache License 2.0", "lines": 1386, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/lora/test_lora_layers_lumina2.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import numpy as np import pytest import torch from transformers import AutoTokenizer, GemmaForCausalLM from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Pipeline, Lumina2Transformer2DModel, ) from ..testing_utils import floats_tensor, is_torch_version, require_peft_backend, skip_mps, torch_device sys.path.append(".") from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 @require_peft_backend class Lumina2LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = Lumina2Pipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_kwargs = {} transformer_kwargs = { "sample_size": 4, "patch_size": 2, "in_channels": 4, "hidden_size": 8, "num_layers": 2, "num_attention_heads": 1, "num_kv_heads": 1, "multiple_of": 16, "ffn_dim_multiplier": None, "norm_eps": 1e-5, "scaling_factor": 1.0, "axes_dim_rope": [4, 2, 2], "cap_feat_dim": 8, } transformer_cls = Lumina2Transformer2DModel vae_kwargs = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "block_out_channels": (4,), "layers_per_block": 1, "latent_channels": 4, "norm_num_groups": 1, "use_quant_conv": False, "use_post_quant_conv": False, "shift_factor": 0.0609, "scaling_factor": 1.5035, } vae_cls = AutoencoderKL tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/dummy-gemma" text_encoder_cls, text_encoder_id = GemmaForCausalLM, "hf-internal-testing/dummy-gemma-diffusers" supports_text_encoder_loras = False @property def output_shape(self): return (1, 4, 4, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 16 num_channels = 4 sizes = (32, 32) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "A painting of a squirrel eating a burger", "num_inference_steps": 2, "guidance_scale": 5.0, "height": 32, "width": 32, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs @unittest.skip("Not supported in Lumina2.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in Lumina2.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in Lumina2.") def test_modify_padding_mode(self): pass @skip_mps @pytest.mark.xfail( condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", strict=False, ) def test_lora_fuse_nan(self): components, text_lora_config, denoiser_lora_config = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") # corrupt one LoRA weight with `inf` values with torch.no_grad(): pipe.transformer.layers[0].attn.to_q.lora_A["adapter-1"].weight += float("inf") # with `safe_fusing=True` we should see an Error with self.assertRaises(ValueError): pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) # without we should not see an error, but every image will be black pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) out = pipe(**inputs)[0] self.assertTrue(np.isnan(out).all())
{ "repo_id": "huggingface/diffusers", "file_path": "tests/lora/test_lora_layers_lumina2.py", "license": "Apache License 2.0", "lines": 125, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:scripts/convert_cogview4_to_diffusers.py
""" Convert a CogView4 checkpoint from SAT(https://github.com/THUDM/SwissArmyTransformer) to the Diffusers format. (deprecated Since 2025-02-07 and will remove it in later CogView4 version) This script converts a CogView4 checkpoint to the Diffusers format, which can then be used with the Diffusers library. Example usage: python scripts/convert_cogview4_to_diffusers.py \ --transformer_checkpoint_path 'your path/cogview4_6b/1/mp_rank_00_model_states.pt' \ --vae_checkpoint_path 'your path/cogview4_6b/imagekl_ch16.pt' \ --output_path "THUDM/CogView4-6B" \ --dtype "bf16" Arguments: --transformer_checkpoint_path: Path to Transformer state dict. --vae_checkpoint_path: Path to VAE state dict. --output_path: The path to save the converted model. --push_to_hub: Whether to push the converted checkpoint to the HF Hub or not. Defaults to `False`. --text_encoder_cache_dir: Cache directory where text encoder is located. Defaults to None, which means HF_HOME will be used --dtype: The dtype to save the model in (default: "bf16", options: "fp16", "bf16", "fp32"). If None, the dtype of the state dict is considered. Default is "bf16" because CogView4 uses bfloat16 for Training. Note: You must provide either --original_state_dict_repo_id or --checkpoint_path. """ import argparse from contextlib import nullcontext import torch from accelerate import init_empty_weights from transformers import GlmForCausalLM, PreTrainedTokenizerFast from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint from diffusers.utils.import_utils import is_accelerate_available CTX = init_empty_weights if is_accelerate_available() else nullcontext parser = argparse.ArgumentParser() parser.add_argument("--transformer_checkpoint_path", default=None, type=str) parser.add_argument("--vae_checkpoint_path", default=None, type=str) parser.add_argument("--output_path", required=True, type=str) parser.add_argument("--push_to_hub", action="store_true", default=False, help="Whether to push to HF Hub after saving") parser.add_argument("--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory") parser.add_argument("--dtype", type=str, default="bf16") args = parser.parse_args() # this is specific to `AdaLayerNormContinuous`: # diffusers implementation split the linear projection into the scale, shift while CogView4 split it tino shift, scale def swap_scale_shift(weight, dim): """ Swap the scale and shift components in the weight tensor. Args: weight (torch.Tensor): The original weight tensor. dim (int): The dimension along which to split. Returns: torch.Tensor: The modified weight tensor with scale and shift swapped. """ shift, scale = weight.chunk(2, dim=dim) new_weight = torch.cat([scale, shift], dim=dim) return new_weight def convert_cogview4_transformer_checkpoint_to_diffusers(ckpt_path): original_state_dict = torch.load(ckpt_path, map_location="cpu") original_state_dict = original_state_dict["module"] original_state_dict = {k.replace("model.diffusion_model.", ""): v for k, v in original_state_dict.items()} new_state_dict = {} # Convert patch_embed new_state_dict["patch_embed.proj.weight"] = original_state_dict.pop("mixins.patch_embed.proj.weight") new_state_dict["patch_embed.proj.bias"] = original_state_dict.pop("mixins.patch_embed.proj.bias") new_state_dict["patch_embed.text_proj.weight"] = original_state_dict.pop("mixins.patch_embed.text_proj.weight") new_state_dict["patch_embed.text_proj.bias"] = original_state_dict.pop("mixins.patch_embed.text_proj.bias") # Convert time_condition_embed new_state_dict["time_condition_embed.timestep_embedder.linear_1.weight"] = original_state_dict.pop( "time_embed.0.weight" ) new_state_dict["time_condition_embed.timestep_embedder.linear_1.bias"] = original_state_dict.pop( "time_embed.0.bias" ) new_state_dict["time_condition_embed.timestep_embedder.linear_2.weight"] = original_state_dict.pop( "time_embed.2.weight" ) new_state_dict["time_condition_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop( "time_embed.2.bias" ) new_state_dict["time_condition_embed.condition_embedder.linear_1.weight"] = original_state_dict.pop( "label_emb.0.0.weight" ) new_state_dict["time_condition_embed.condition_embedder.linear_1.bias"] = original_state_dict.pop( "label_emb.0.0.bias" ) new_state_dict["time_condition_embed.condition_embedder.linear_2.weight"] = original_state_dict.pop( "label_emb.0.2.weight" ) new_state_dict["time_condition_embed.condition_embedder.linear_2.bias"] = original_state_dict.pop( "label_emb.0.2.bias" ) # Convert transformer blocks, for cogview4 is 28 blocks for i in range(28): block_prefix = f"transformer_blocks.{i}." old_prefix = f"transformer.layers.{i}." adaln_prefix = f"mixins.adaln.adaln_modules.{i}." new_state_dict[block_prefix + "norm1.linear.weight"] = original_state_dict.pop(adaln_prefix + "1.weight") new_state_dict[block_prefix + "norm1.linear.bias"] = original_state_dict.pop(adaln_prefix + "1.bias") qkv_weight = original_state_dict.pop(old_prefix + "attention.query_key_value.weight") qkv_bias = original_state_dict.pop(old_prefix + "attention.query_key_value.bias") q, k, v = qkv_weight.chunk(3, dim=0) q_bias, k_bias, v_bias = qkv_bias.chunk(3, dim=0) new_state_dict[block_prefix + "attn1.to_q.weight"] = q new_state_dict[block_prefix + "attn1.to_q.bias"] = q_bias new_state_dict[block_prefix + "attn1.to_k.weight"] = k new_state_dict[block_prefix + "attn1.to_k.bias"] = k_bias new_state_dict[block_prefix + "attn1.to_v.weight"] = v new_state_dict[block_prefix + "attn1.to_v.bias"] = v_bias new_state_dict[block_prefix + "attn1.to_out.0.weight"] = original_state_dict.pop( old_prefix + "attention.dense.weight" ) new_state_dict[block_prefix + "attn1.to_out.0.bias"] = original_state_dict.pop( old_prefix + "attention.dense.bias" ) new_state_dict[block_prefix + "ff.net.0.proj.weight"] = original_state_dict.pop( old_prefix + "mlp.dense_h_to_4h.weight" ) new_state_dict[block_prefix + "ff.net.0.proj.bias"] = original_state_dict.pop( old_prefix + "mlp.dense_h_to_4h.bias" ) new_state_dict[block_prefix + "ff.net.2.weight"] = original_state_dict.pop( old_prefix + "mlp.dense_4h_to_h.weight" ) new_state_dict[block_prefix + "ff.net.2.bias"] = original_state_dict.pop(old_prefix + "mlp.dense_4h_to_h.bias") # Convert final norm and projection new_state_dict["norm_out.linear.weight"] = swap_scale_shift( original_state_dict.pop("mixins.final_layer.adaln.1.weight"), dim=0 ) new_state_dict["norm_out.linear.bias"] = swap_scale_shift( original_state_dict.pop("mixins.final_layer.adaln.1.bias"), dim=0 ) new_state_dict["proj_out.weight"] = original_state_dict.pop("mixins.final_layer.linear.weight") new_state_dict["proj_out.bias"] = original_state_dict.pop("mixins.final_layer.linear.bias") return new_state_dict def convert_cogview4_vae_checkpoint_to_diffusers(ckpt_path, vae_config): original_state_dict = torch.load(ckpt_path, map_location="cpu")["state_dict"] return convert_ldm_vae_checkpoint(original_state_dict, vae_config) def main(args): if args.dtype == "fp16": dtype = torch.float16 elif args.dtype == "bf16": dtype = torch.bfloat16 elif args.dtype == "fp32": dtype = torch.float32 else: raise ValueError(f"Unsupported dtype: {args.dtype}") transformer = None vae = None if args.transformer_checkpoint_path is not None: converted_transformer_state_dict = convert_cogview4_transformer_checkpoint_to_diffusers( args.transformer_checkpoint_path ) transformer = CogView4Transformer2DModel( patch_size=2, in_channels=16, num_layers=28, attention_head_dim=128, num_attention_heads=32, out_channels=16, text_embed_dim=4096, time_embed_dim=512, condition_dim=256, pos_embed_max_size=128, ) transformer.load_state_dict(converted_transformer_state_dict, strict=True) if dtype is not None: # Original checkpoint data type will be preserved transformer = transformer.to(dtype=dtype) if args.vae_checkpoint_path is not None: vae_config = { "in_channels": 3, "out_channels": 3, "down_block_types": ("DownEncoderBlock2D",) * 4, "up_block_types": ("UpDecoderBlock2D",) * 4, "block_out_channels": (128, 512, 1024, 1024), "layers_per_block": 3, "act_fn": "silu", "latent_channels": 16, "norm_num_groups": 32, "sample_size": 1024, "scaling_factor": 1.0, "shift_factor": 0.0, "force_upcast": True, "use_quant_conv": False, "use_post_quant_conv": False, "mid_block_add_attention": False, } converted_vae_state_dict = convert_cogview4_vae_checkpoint_to_diffusers(args.vae_checkpoint_path, vae_config) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_state_dict, strict=True) if dtype is not None: vae = vae.to(dtype=dtype) text_encoder_id = "THUDM/glm-4-9b-hf" tokenizer = PreTrainedTokenizerFast.from_pretrained(text_encoder_id) text_encoder = GlmForCausalLM.from_pretrained( text_encoder_id, cache_dir=args.text_encoder_cache_dir, torch_dtype=torch.bfloat16 if args.dtype == "bf16" else torch.float32, ) for param in text_encoder.parameters(): param.data = param.data.contiguous() scheduler = FlowMatchEulerDiscreteScheduler( base_shift=0.25, max_shift=0.75, base_image_seq_len=256, use_dynamic_shifting=True, time_shift_type="linear" ) pipe = CogView4Pipeline( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler, ) # This is necessary for users with insufficient memory, such as those using Colab and notebooks, as it can # save some memory used for model loading. pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub) if __name__ == "__main__": main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "scripts/convert_cogview4_to_diffusers.py", "license": "Apache License 2.0", "lines": 212, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:scripts/convert_cogview4_to_diffusers_megatron.py
""" Convert a CogView4 checkpoint from Megatron to the Diffusers format. Example usage: python scripts/convert_cogview4_to_diffusers.py \ --transformer_checkpoint_path 'your path/cogview4_6b/mp_rank_00/model_optim_rng.pt' \ --vae_checkpoint_path 'your path/cogview4_6b/imagekl_ch16.pt' \ --output_path "THUDM/CogView4-6B" \ --dtype "bf16" Arguments: --transformer_checkpoint_path: Path to Transformer state dict. --vae_checkpoint_path: Path to VAE state dict. --output_path: The path to save the converted model. --push_to_hub: Whether to push the converted checkpoint to the HF Hub or not. Defaults to `False`. --text_encoder_cache_dir: Cache directory where text encoder is located. Defaults to None, which means HF_HOME will be used. --dtype: The dtype to save the model in (default: "bf16", options: "fp16", "bf16", "fp32"). If None, the dtype of the state dict is considered. Default is "bf16" because CogView4 uses bfloat16 for training. Note: You must provide either --transformer_checkpoint_path or --vae_checkpoint_path. """ import argparse import torch from tqdm import tqdm from transformers import GlmModel, PreTrainedTokenizerFast from diffusers import ( AutoencoderKL, CogView4ControlPipeline, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler, ) from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint parser = argparse.ArgumentParser() parser.add_argument( "--transformer_checkpoint_path", default=None, type=str, help="Path to Megatron (not SAT) Transformer checkpoint, e.g., 'model_optim_rng.pt'.", ) parser.add_argument( "--vae_checkpoint_path", default=None, type=str, help="(Optional) Path to VAE checkpoint, e.g., 'imagekl_ch16.pt'.", ) parser.add_argument( "--output_path", required=True, type=str, help="Directory to save the final Diffusers format pipeline.", ) parser.add_argument( "--push_to_hub", action="store_true", default=False, help="Whether to push the converted model to the HuggingFace Hub.", ) parser.add_argument( "--text_encoder_cache_dir", type=str, default=None, help="Specify the cache directory for the text encoder.", ) parser.add_argument( "--dtype", type=str, default="bf16", choices=["fp16", "bf16", "fp32"], help="Data type to save the model in.", ) parser.add_argument( "--num_layers", type=int, default=28, help="Number of Transformer layers (e.g., 28, 48...).", ) parser.add_argument( "--num_heads", type=int, default=32, help="Number of attention heads.", ) parser.add_argument( "--hidden_size", type=int, default=4096, help="Transformer hidden dimension size.", ) parser.add_argument( "--attention_head_dim", type=int, default=128, help="Dimension of each attention head.", ) parser.add_argument( "--time_embed_dim", type=int, default=512, help="Dimension of time embeddings.", ) parser.add_argument( "--condition_dim", type=int, default=256, help="Dimension of condition embeddings.", ) parser.add_argument( "--pos_embed_max_size", type=int, default=128, help="Maximum size for positional embeddings.", ) parser.add_argument( "--control", action="store_true", default=False, help="Whether to use control model.", ) args = parser.parse_args() def swap_scale_shift(weight, dim): """ Swap the scale and shift components in the weight tensor. Args: weight (torch.Tensor): The original weight tensor. dim (int): The dimension along which to split. Returns: torch.Tensor: The modified weight tensor with scale and shift swapped. """ shift, scale = weight.chunk(2, dim=dim) new_weight = torch.cat([scale, shift], dim=dim) return new_weight def convert_megatron_transformer_checkpoint_to_diffusers( ckpt_path: str, num_layers: int, num_heads: int, hidden_size: int, ): """ Convert a Megatron Transformer checkpoint to Diffusers format. Args: ckpt_path (str): Path to the Megatron Transformer checkpoint. num_layers (int): Number of Transformer layers. num_heads (int): Number of attention heads. hidden_size (int): Hidden size of the Transformer. Returns: dict: The converted state dictionary compatible with Diffusers. """ ckpt = torch.load(ckpt_path, map_location="cpu", weights_only=False) mega = ckpt["model"] new_state_dict = {} # Patch Embedding new_state_dict["patch_embed.proj.weight"] = mega["encoder_expand_linear.weight"].reshape( hidden_size, 128 if args.control else 64 ) new_state_dict["patch_embed.proj.bias"] = mega["encoder_expand_linear.bias"] new_state_dict["patch_embed.text_proj.weight"] = mega["text_projector.weight"] new_state_dict["patch_embed.text_proj.bias"] = mega["text_projector.bias"] # Time Condition Embedding new_state_dict["time_condition_embed.timestep_embedder.linear_1.weight"] = mega[ "time_embedding.time_embed.0.weight" ] new_state_dict["time_condition_embed.timestep_embedder.linear_1.bias"] = mega["time_embedding.time_embed.0.bias"] new_state_dict["time_condition_embed.timestep_embedder.linear_2.weight"] = mega[ "time_embedding.time_embed.2.weight" ] new_state_dict["time_condition_embed.timestep_embedder.linear_2.bias"] = mega["time_embedding.time_embed.2.bias"] new_state_dict["time_condition_embed.condition_embedder.linear_1.weight"] = mega[ "label_embedding.label_embed.0.weight" ] new_state_dict["time_condition_embed.condition_embedder.linear_1.bias"] = mega[ "label_embedding.label_embed.0.bias" ] new_state_dict["time_condition_embed.condition_embedder.linear_2.weight"] = mega[ "label_embedding.label_embed.2.weight" ] new_state_dict["time_condition_embed.condition_embedder.linear_2.bias"] = mega[ "label_embedding.label_embed.2.bias" ] # Convert each Transformer layer for i in tqdm(range(num_layers), desc="Converting layers (Megatron->Diffusers)"): block_prefix = f"transformer_blocks.{i}." # AdaLayerNorm new_state_dict[block_prefix + "norm1.linear.weight"] = mega[f"decoder.layers.{i}.adaln.weight"] new_state_dict[block_prefix + "norm1.linear.bias"] = mega[f"decoder.layers.{i}.adaln.bias"] qkv_weight = mega[f"decoder.layers.{i}.self_attention.linear_qkv.weight"] qkv_bias = mega[f"decoder.layers.{i}.self_attention.linear_qkv.bias"] # Reshape to match SAT logic qkv_weight = qkv_weight.view(num_heads, 3, hidden_size // num_heads, hidden_size) qkv_weight = qkv_weight.permute(1, 0, 2, 3).reshape(3 * hidden_size, hidden_size) qkv_bias = qkv_bias.view(num_heads, 3, hidden_size // num_heads) qkv_bias = qkv_bias.permute(1, 0, 2).reshape(3 * hidden_size) # Assign to Diffusers keys q, k, v = torch.chunk(qkv_weight, 3, dim=0) qb, kb, vb = torch.chunk(qkv_bias, 3, dim=0) new_state_dict[block_prefix + "attn1.to_q.weight"] = q new_state_dict[block_prefix + "attn1.to_q.bias"] = qb new_state_dict[block_prefix + "attn1.to_k.weight"] = k new_state_dict[block_prefix + "attn1.to_k.bias"] = kb new_state_dict[block_prefix + "attn1.to_v.weight"] = v new_state_dict[block_prefix + "attn1.to_v.bias"] = vb # Attention Output new_state_dict[block_prefix + "attn1.to_out.0.weight"] = mega[ f"decoder.layers.{i}.self_attention.linear_proj.weight" ] new_state_dict[block_prefix + "attn1.to_out.0.bias"] = mega[ f"decoder.layers.{i}.self_attention.linear_proj.bias" ] # MLP new_state_dict[block_prefix + "ff.net.0.proj.weight"] = mega[f"decoder.layers.{i}.mlp.linear_fc1.weight"] new_state_dict[block_prefix + "ff.net.0.proj.bias"] = mega[f"decoder.layers.{i}.mlp.linear_fc1.bias"] new_state_dict[block_prefix + "ff.net.2.weight"] = mega[f"decoder.layers.{i}.mlp.linear_fc2.weight"] new_state_dict[block_prefix + "ff.net.2.bias"] = mega[f"decoder.layers.{i}.mlp.linear_fc2.bias"] # Final Layers new_state_dict["norm_out.linear.weight"] = swap_scale_shift(mega["adaln_final.weight"], dim=0) new_state_dict["norm_out.linear.bias"] = swap_scale_shift(mega["adaln_final.bias"], dim=0) new_state_dict["proj_out.weight"] = mega["output_projector.weight"] new_state_dict["proj_out.bias"] = mega["output_projector.bias"] return new_state_dict def convert_cogview4_vae_checkpoint_to_diffusers(ckpt_path, vae_config): """ Convert a CogView4 VAE checkpoint to Diffusers format. Args: ckpt_path (str): Path to the VAE checkpoint. vae_config (dict): Configuration dictionary for the VAE. Returns: dict: The converted VAE state dictionary compatible with Diffusers. """ original_state_dict = torch.load(ckpt_path, map_location="cpu", weights_only=False)["state_dict"] return convert_ldm_vae_checkpoint(original_state_dict, vae_config) def main(args): """ Main function to convert CogView4 checkpoints to Diffusers format. Args: args (argparse.Namespace): Parsed command-line arguments. """ # Determine the desired data type if args.dtype == "fp16": dtype = torch.float16 elif args.dtype == "bf16": dtype = torch.bfloat16 elif args.dtype == "fp32": dtype = torch.float32 else: raise ValueError(f"Unsupported dtype: {args.dtype}") transformer = None vae = None # Convert Transformer checkpoint if provided if args.transformer_checkpoint_path is not None: converted_transformer_state_dict = convert_megatron_transformer_checkpoint_to_diffusers( ckpt_path=args.transformer_checkpoint_path, num_layers=args.num_layers, num_heads=args.num_heads, hidden_size=args.hidden_size, ) transformer = CogView4Transformer2DModel( patch_size=2, in_channels=32 if args.control else 16, num_layers=args.num_layers, attention_head_dim=args.attention_head_dim, num_attention_heads=args.num_heads, out_channels=16, text_embed_dim=args.hidden_size, time_embed_dim=args.time_embed_dim, condition_dim=args.condition_dim, pos_embed_max_size=args.pos_embed_max_size, ) transformer.load_state_dict(converted_transformer_state_dict, strict=True) # Convert to the specified dtype if dtype is not None: transformer = transformer.to(dtype=dtype) # Convert VAE checkpoint if provided if args.vae_checkpoint_path is not None: vae_config = { "in_channels": 3, "out_channels": 3, "down_block_types": ("DownEncoderBlock2D",) * 4, "up_block_types": ("UpDecoderBlock2D",) * 4, "block_out_channels": (128, 512, 1024, 1024), "layers_per_block": 3, "act_fn": "silu", "latent_channels": 16, "norm_num_groups": 32, "sample_size": 1024, "scaling_factor": 1.0, "shift_factor": 0.0, "force_upcast": True, "use_quant_conv": False, "use_post_quant_conv": False, "mid_block_add_attention": False, } converted_vae_state_dict = convert_cogview4_vae_checkpoint_to_diffusers(args.vae_checkpoint_path, vae_config) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_state_dict, strict=True) if dtype is not None: vae = vae.to(dtype=dtype) # Load the text encoder and tokenizer text_encoder_id = "THUDM/glm-4-9b-hf" tokenizer = PreTrainedTokenizerFast.from_pretrained(text_encoder_id) text_encoder = GlmModel.from_pretrained( text_encoder_id, cache_dir=args.text_encoder_cache_dir, torch_dtype=torch.bfloat16 if args.dtype == "bf16" else torch.float32, ) for param in text_encoder.parameters(): param.data = param.data.contiguous() # Initialize the scheduler scheduler = FlowMatchEulerDiscreteScheduler( base_shift=0.25, max_shift=0.75, base_image_seq_len=256, use_dynamic_shifting=True, time_shift_type="linear" ) # Create the pipeline if args.control: pipe = CogView4ControlPipeline( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler, ) else: pipe = CogView4Pipeline( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler, ) # Save the converted pipeline pipe.save_pretrained( args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub, ) if __name__ == "__main__": main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "scripts/convert_cogview4_to_diffusers_megatron.py", "license": "Apache License 2.0", "lines": 333, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:src/diffusers/models/transformers/transformer_cogview4.py
# Copyright 2025 The CogView team, Tsinghua University & ZhipuAI and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...utils import apply_lora_scale, logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention import FeedForward from ..attention_processor import Attention from ..cache_utils import CacheMixin from ..embeddings import CogView3CombinedTimestepSizeEmbeddings from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import LayerNorm, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class CogView4PatchEmbed(nn.Module): def __init__( self, in_channels: int = 16, hidden_size: int = 2560, patch_size: int = 2, text_hidden_size: int = 4096, ): super().__init__() self.patch_size = patch_size self.proj = nn.Linear(in_channels * patch_size**2, hidden_size) self.text_proj = nn.Linear(text_hidden_size, hidden_size) def forward(self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor) -> torch.Tensor: batch_size, channel, height, width = hidden_states.shape post_patch_height = height // self.patch_size post_patch_width = width // self.patch_size hidden_states = hidden_states.reshape( batch_size, channel, post_patch_height, self.patch_size, post_patch_width, self.patch_size ) hidden_states = hidden_states.permute(0, 2, 4, 1, 3, 5).flatten(3, 5).flatten(1, 2) hidden_states = self.proj(hidden_states) encoder_hidden_states = self.text_proj(encoder_hidden_states) return hidden_states, encoder_hidden_states class CogView4AdaLayerNormZero(nn.Module): def __init__(self, embedding_dim: int, dim: int) -> None: super().__init__() self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5) self.norm_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5) self.linear = nn.Linear(embedding_dim, 12 * dim, bias=True) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: dtype = hidden_states.dtype norm_hidden_states = self.norm(hidden_states).to(dtype=dtype) norm_encoder_hidden_states = self.norm_context(encoder_hidden_states).to(dtype=dtype) emb = self.linear(temb) ( shift_msa, c_shift_msa, scale_msa, c_scale_msa, gate_msa, c_gate_msa, shift_mlp, c_shift_mlp, scale_mlp, c_scale_mlp, gate_mlp, c_gate_mlp, ) = emb.chunk(12, dim=1) hidden_states = norm_hidden_states * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1) encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_msa.unsqueeze(1)) + c_shift_msa.unsqueeze(1) return ( hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp, ) class CogView4AttnProcessor: """ Processor for implementing scaled dot-product attention for the CogView4 model. It applies a rotary embedding on query and key vectors, but does not include spatial normalization. The processor supports passing an attention mask for text tokens. The attention mask should have shape (batch_size, text_seq_length) where 1 indicates a non-padded token and 0 indicates a padded token. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CogView4AttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: dtype = encoder_hidden_states.dtype batch_size, text_seq_length, embed_dim = encoder_hidden_states.shape batch_size, image_seq_length, embed_dim = hidden_states.shape hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) # 1. QKV projections query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2) key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2) value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2) # 2. QK normalization if attn.norm_q is not None: query = attn.norm_q(query).to(dtype=dtype) if attn.norm_k is not None: key = attn.norm_k(key).to(dtype=dtype) # 3. Rotational positional embeddings applied to latent stream if image_rotary_emb is not None: from ..embeddings import apply_rotary_emb query[:, :, text_seq_length:, :] = apply_rotary_emb( query[:, :, text_seq_length:, :], image_rotary_emb, use_real_unbind_dim=-2 ) key[:, :, text_seq_length:, :] = apply_rotary_emb( key[:, :, text_seq_length:, :], image_rotary_emb, use_real_unbind_dim=-2 ) # 4. Attention if attention_mask is not None: text_attn_mask = attention_mask assert text_attn_mask.dim() == 2, "the shape of text_attn_mask should be (batch_size, text_seq_length)" text_attn_mask = text_attn_mask.float().to(query.device) mix_attn_mask = torch.ones((batch_size, text_seq_length + image_seq_length), device=query.device) mix_attn_mask[:, :text_seq_length] = text_attn_mask mix_attn_mask = mix_attn_mask.unsqueeze(2) attn_mask_matrix = mix_attn_mask @ mix_attn_mask.transpose(1, 2) attention_mask = (attn_mask_matrix > 0).unsqueeze(1).to(query.dtype) hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).flatten(2, 3) hidden_states = hidden_states.type_as(query) # 5. Output projection hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states, hidden_states = hidden_states.split( [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 ) return hidden_states, encoder_hidden_states class CogView4TrainingAttnProcessor: """ Training Processor for implementing scaled dot-product attention for the CogView4 model. It applies a rotary embedding on query and key vectors, but does not include spatial normalization. This processor differs from CogView4AttnProcessor in several important ways: 1. It supports attention masking with variable sequence lengths for multi-resolution training 2. It unpacks and repacks sequences for efficient training with variable sequence lengths when batch_flag is provided """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CogView4AttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, latent_attn_mask: torch.Tensor | None = None, text_attn_mask: torch.Tensor | None = None, batch_flag: torch.Tensor | None = None, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | list[tuple[torch.Tensor, torch.Tensor]] | None = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Args: attn (`Attention`): The attention module. hidden_states (`torch.Tensor`): The input hidden states. encoder_hidden_states (`torch.Tensor`): The encoder hidden states for cross-attention. latent_attn_mask (`torch.Tensor`, *optional*): Mask for latent tokens where 0 indicates pad token and 1 indicates non-pad token. If None, full attention is used for all latent tokens. Note: the shape of latent_attn_mask is (batch_size, num_latent_tokens). text_attn_mask (`torch.Tensor`, *optional*): Mask for text tokens where 0 indicates pad token and 1 indicates non-pad token. If None, full attention is used for all text tokens. batch_flag (`torch.Tensor`, *optional*): Values from 0 to n-1 indicating which samples belong to the same batch. Samples with the same batch_flag are packed together. Example: [0, 1, 1, 2, 2] means sample 0 forms batch0, samples 1-2 form batch1, and samples 3-4 form batch2. If None, no packing is used. image_rotary_emb (`tuple[torch.Tensor, torch.Tensor]` or `list[tuple[torch.Tensor, torch.Tensor]]`, *optional*): The rotary embedding for the image part of the input. Returns: `tuple[torch.Tensor, torch.Tensor]`: The processed hidden states for both image and text streams. """ # Get dimensions and device info batch_size, text_seq_length, embed_dim = encoder_hidden_states.shape batch_size, image_seq_length, embed_dim = hidden_states.shape dtype = encoder_hidden_states.dtype device = encoder_hidden_states.device latent_hidden_states = hidden_states # Combine text and image streams for joint processing mixed_hidden_states = torch.cat([encoder_hidden_states, latent_hidden_states], dim=1) # 1. Construct attention mask and maybe packing input # Create default masks if not provided if text_attn_mask is None: text_attn_mask = torch.ones((batch_size, text_seq_length), dtype=torch.int32, device=device) if latent_attn_mask is None: latent_attn_mask = torch.ones((batch_size, image_seq_length), dtype=torch.int32, device=device) # Validate mask shapes and types assert text_attn_mask.dim() == 2, "the shape of text_attn_mask should be (batch_size, text_seq_length)" assert text_attn_mask.dtype == torch.int32, "the dtype of text_attn_mask should be torch.int32" assert latent_attn_mask.dim() == 2, "the shape of latent_attn_mask should be (batch_size, num_latent_tokens)" assert latent_attn_mask.dtype == torch.int32, "the dtype of latent_attn_mask should be torch.int32" # Create combined mask for text and image tokens mixed_attn_mask = torch.ones( (batch_size, text_seq_length + image_seq_length), dtype=torch.int32, device=device ) mixed_attn_mask[:, :text_seq_length] = text_attn_mask mixed_attn_mask[:, text_seq_length:] = latent_attn_mask # Convert mask to attention matrix format (where 1 means attend, 0 means don't attend) mixed_attn_mask_input = mixed_attn_mask.unsqueeze(2).to(dtype=dtype) attn_mask_matrix = mixed_attn_mask_input @ mixed_attn_mask_input.transpose(1, 2) # Handle batch packing if enabled if batch_flag is not None: assert batch_flag.dim() == 1 # Determine packed batch size based on batch_flag packing_batch_size = torch.max(batch_flag).item() + 1 # Calculate actual sequence lengths for each sample based on masks text_seq_length = torch.sum(text_attn_mask, dim=1) latent_seq_length = torch.sum(latent_attn_mask, dim=1) mixed_seq_length = text_seq_length + latent_seq_length # Calculate packed sequence lengths for each packed batch mixed_seq_length_packed = [ torch.sum(mixed_attn_mask[batch_flag == batch_idx]).item() for batch_idx in range(packing_batch_size) ] assert len(mixed_seq_length_packed) == packing_batch_size # Pack sequences by removing padding tokens mixed_attn_mask_flatten = mixed_attn_mask.flatten(0, 1) mixed_hidden_states_flatten = mixed_hidden_states.flatten(0, 1) mixed_hidden_states_unpad = mixed_hidden_states_flatten[mixed_attn_mask_flatten == 1] assert torch.sum(mixed_seq_length) == mixed_hidden_states_unpad.shape[0] # Split the unpadded sequence into packed batches mixed_hidden_states_packed = torch.split(mixed_hidden_states_unpad, mixed_seq_length_packed) # Re-pad to create packed batches with right-side padding mixed_hidden_states_packed_padded = torch.nn.utils.rnn.pad_sequence( mixed_hidden_states_packed, batch_first=True, padding_value=0.0, padding_side="right", ) # Create attention mask for packed batches l = mixed_hidden_states_packed_padded.shape[1] attn_mask_matrix = torch.zeros( (packing_batch_size, l, l), dtype=dtype, device=device, ) # Fill attention mask with block diagonal matrices # This ensures that tokens can only attend to other tokens within the same original sample for idx, mask in enumerate(attn_mask_matrix): seq_lengths = mixed_seq_length[batch_flag == idx] offset = 0 for length in seq_lengths: # Create a block of 1s for each sample in the packed batch mask[offset : offset + length, offset : offset + length] = 1 offset += length attn_mask_matrix = attn_mask_matrix.to(dtype=torch.bool) attn_mask_matrix = attn_mask_matrix.unsqueeze(1) # Add attention head dim attention_mask = attn_mask_matrix # Prepare hidden states for attention computation if batch_flag is None: # If no packing, just combine text and image tokens hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) else: # If packing, use the packed sequence hidden_states = mixed_hidden_states_packed_padded # 2. QKV projections - convert hidden states to query, key, value query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) # Reshape for multi-head attention: [batch, seq_len, heads*dim] -> [batch, heads, seq_len, dim] query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2) key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2) value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2) # 3. QK normalization - apply layer norm to queries and keys if configured if attn.norm_q is not None: query = attn.norm_q(query).to(dtype=dtype) if attn.norm_k is not None: key = attn.norm_k(key).to(dtype=dtype) # 4. Apply rotary positional embeddings to image tokens only if image_rotary_emb is not None: from ..embeddings import apply_rotary_emb if batch_flag is None: # Apply RoPE only to image tokens (after text tokens) query[:, :, text_seq_length:, :] = apply_rotary_emb( query[:, :, text_seq_length:, :], image_rotary_emb, use_real_unbind_dim=-2 ) key[:, :, text_seq_length:, :] = apply_rotary_emb( key[:, :, text_seq_length:, :], image_rotary_emb, use_real_unbind_dim=-2 ) else: # For packed batches, need to carefully apply RoPE to appropriate tokens assert query.shape[0] == packing_batch_size assert key.shape[0] == packing_batch_size assert len(image_rotary_emb) == batch_size rope_idx = 0 for idx in range(packing_batch_size): offset = 0 # Get text and image sequence lengths for samples in this packed batch text_seq_length_bi = text_seq_length[batch_flag == idx] latent_seq_length_bi = latent_seq_length[batch_flag == idx] # Apply RoPE to each image segment in the packed sequence for tlen, llen in zip(text_seq_length_bi, latent_seq_length_bi): mlen = tlen + llen # Apply RoPE only to image tokens (after text tokens) query[idx, :, offset + tlen : offset + mlen, :] = apply_rotary_emb( query[idx, :, offset + tlen : offset + mlen, :], image_rotary_emb[rope_idx], use_real_unbind_dim=-2, ) key[idx, :, offset + tlen : offset + mlen, :] = apply_rotary_emb( key[idx, :, offset + tlen : offset + mlen, :], image_rotary_emb[rope_idx], use_real_unbind_dim=-2, ) offset += mlen rope_idx += 1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) # Reshape back: [batch, heads, seq_len, dim] -> [batch, seq_len, heads*dim] hidden_states = hidden_states.transpose(1, 2).flatten(2, 3) hidden_states = hidden_states.type_as(query) # 5. Output projection - project attention output to model dimension hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) # Split the output back into text and image streams if batch_flag is None: # Simple split for non-packed case encoder_hidden_states, hidden_states = hidden_states.split( [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 ) else: # For packed case: need to unpack, split text/image, then restore to original shapes # First, unpad the sequence based on the packed sequence lengths hidden_states_unpad = torch.nn.utils.rnn.unpad_sequence( hidden_states, lengths=torch.tensor(mixed_seq_length_packed), batch_first=True, ) # Concatenate all unpadded sequences hidden_states_flatten = torch.cat(hidden_states_unpad, dim=0) # Split by original sample sequence lengths hidden_states_unpack = torch.split(hidden_states_flatten, mixed_seq_length.tolist()) assert len(hidden_states_unpack) == batch_size # Further split each sample's sequence into text and image parts hidden_states_unpack = [ torch.split(h, [tlen, llen]) for h, tlen, llen in zip(hidden_states_unpack, text_seq_length, latent_seq_length) ] # Separate text and image sequences encoder_hidden_states_unpad = [h[0] for h in hidden_states_unpack] hidden_states_unpad = [h[1] for h in hidden_states_unpack] # Update the original tensors with the processed values, respecting the attention masks for idx in range(batch_size): # Place unpacked text tokens back in the encoder_hidden_states tensor encoder_hidden_states[idx][text_attn_mask[idx] == 1] = encoder_hidden_states_unpad[idx] # Place unpacked image tokens back in the latent_hidden_states tensor latent_hidden_states[idx][latent_attn_mask[idx] == 1] = hidden_states_unpad[idx] # Update the output hidden states hidden_states = latent_hidden_states return hidden_states, encoder_hidden_states @maybe_allow_in_graph class CogView4TransformerBlock(nn.Module): def __init__( self, dim: int = 2560, num_attention_heads: int = 64, attention_head_dim: int = 40, time_embed_dim: int = 512, ) -> None: super().__init__() # 1. Attention self.norm1 = CogView4AdaLayerNormZero(time_embed_dim, dim) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, out_dim=dim, bias=True, qk_norm="layer_norm", elementwise_affine=False, eps=1e-5, processor=CogView4AttnProcessor(), ) # 2. Feedforward self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5) self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor | None = None, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | list[tuple[torch.Tensor, torch.Tensor]] | None = None, attention_mask: dict[str, torch.Tensor] | None = None, attention_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: # 1. Timestep conditioning ( norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp, ) = self.norm1(hidden_states, encoder_hidden_states, temb) # 2. Attention if attention_kwargs is None: attention_kwargs = {} attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, attention_mask=attention_mask, **attention_kwargs, ) hidden_states = hidden_states + attn_hidden_states * gate_msa.unsqueeze(1) encoder_hidden_states = encoder_hidden_states + attn_encoder_hidden_states * c_gate_msa.unsqueeze(1) # 3. Feedforward norm_hidden_states = self.norm2(hidden_states) * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1) norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) * ( 1 + c_scale_mlp.unsqueeze(1) ) + c_shift_mlp.unsqueeze(1) ff_output = self.ff(norm_hidden_states) ff_output_context = self.ff(norm_encoder_hidden_states) hidden_states = hidden_states + ff_output * gate_mlp.unsqueeze(1) encoder_hidden_states = encoder_hidden_states + ff_output_context * c_gate_mlp.unsqueeze(1) return hidden_states, encoder_hidden_states class CogView4RotaryPosEmbed(nn.Module): def __init__(self, dim: int, patch_size: int, rope_axes_dim: tuple[int, int], theta: float = 10000.0) -> None: super().__init__() self.dim = dim self.patch_size = patch_size self.rope_axes_dim = rope_axes_dim self.theta = theta def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: batch_size, num_channels, height, width = hidden_states.shape height, width = height // self.patch_size, width // self.patch_size dim_h, dim_w = self.dim // 2, self.dim // 2 h_inv_freq = 1.0 / ( self.theta ** (torch.arange(0, dim_h, 2, dtype=torch.float32)[: (dim_h // 2)].float() / dim_h) ) w_inv_freq = 1.0 / ( self.theta ** (torch.arange(0, dim_w, 2, dtype=torch.float32)[: (dim_w // 2)].float() / dim_w) ) h_seq = torch.arange(self.rope_axes_dim[0]) w_seq = torch.arange(self.rope_axes_dim[1]) freqs_h = torch.outer(h_seq, h_inv_freq) freqs_w = torch.outer(w_seq, w_inv_freq) h_idx = torch.arange(height, device=freqs_h.device) w_idx = torch.arange(width, device=freqs_w.device) inner_h_idx = h_idx * self.rope_axes_dim[0] // height inner_w_idx = w_idx * self.rope_axes_dim[1] // width freqs_h = freqs_h[inner_h_idx] freqs_w = freqs_w[inner_w_idx] # Create position matrices for height and width # [height, 1, dim//4] and [1, width, dim//4] freqs_h = freqs_h.unsqueeze(1) freqs_w = freqs_w.unsqueeze(0) # Broadcast freqs_h and freqs_w to [height, width, dim//4] freqs_h = freqs_h.expand(height, width, -1) freqs_w = freqs_w.expand(height, width, -1) # Concatenate along last dimension to get [height, width, dim//2] freqs = torch.cat([freqs_h, freqs_w], dim=-1) freqs = torch.cat([freqs, freqs], dim=-1) # [height, width, dim] freqs = freqs.reshape(height * width, -1) return (freqs.cos(), freqs.sin()) class CogView4AdaLayerNormContinuous(nn.Module): """ CogView4-only final AdaLN: LN(x) -> Linear(cond) -> chunk -> affine. Matches Megatron: **no activation** before the Linear on conditioning embedding. """ def __init__( self, embedding_dim: int, conditioning_embedding_dim: int, elementwise_affine: bool = True, eps: float = 1e-5, bias: bool = True, norm_type: str = "layer_norm", ): super().__init__() self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias) if norm_type == "layer_norm": self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias) elif norm_type == "rms_norm": self.norm = RMSNorm(embedding_dim, eps, elementwise_affine) else: raise ValueError(f"unknown norm_type {norm_type}") def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: # *** NO SiLU here *** emb = self.linear(conditioning_embedding.to(x.dtype)) scale, shift = torch.chunk(emb, 2, dim=1) x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] return x class CogView4Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, CacheMixin): r""" Args: patch_size (`int`, defaults to `2`): The size of the patches to use in the patch embedding layer. in_channels (`int`, defaults to `16`): The number of channels in the input. num_layers (`int`, defaults to `30`): The number of layers of Transformer blocks to use. attention_head_dim (`int`, defaults to `40`): The number of channels in each head. num_attention_heads (`int`, defaults to `64`): The number of heads to use for multi-head attention. out_channels (`int`, defaults to `16`): The number of channels in the output. text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. time_embed_dim (`int`, defaults to `512`): Output dimension of timestep embeddings. condition_dim (`int`, defaults to `256`): The embedding dimension of the input SDXL-style resolution conditions (original_size, target_size, crop_coords). pos_embed_max_size (`int`, defaults to `128`): The maximum resolution of the positional embeddings, from which slices of shape `H x W` are taken and added to input patched latents, where `H` and `W` are the latent height and width respectively. A value of 128 means that the maximum supported height and width for image generation is `128 * vae_scale_factor * patch_size => 128 * 8 * 2 => 2048`. sample_size (`int`, defaults to `128`): The base resolution of input latents. If height/width is not provided during generation, this value is used to determine the resolution as `sample_size * vae_scale_factor => 128 * 8 => 1024` """ _supports_gradient_checkpointing = True _no_split_modules = ["CogView4TransformerBlock", "CogView4PatchEmbed", "CogView4PatchEmbed"] _skip_layerwise_casting_patterns = ["patch_embed", "norm", "proj_out"] @register_to_config def __init__( self, patch_size: int = 2, in_channels: int = 16, out_channels: int = 16, num_layers: int = 30, attention_head_dim: int = 40, num_attention_heads: int = 64, text_embed_dim: int = 4096, time_embed_dim: int = 512, condition_dim: int = 256, pos_embed_max_size: int = 128, sample_size: int = 128, rope_axes_dim: tuple[int, int] = (256, 256), ): super().__init__() # CogView4 uses 3 additional SDXL-like conditions - original_size, target_size, crop_coords # Each of these are sincos embeddings of shape 2 * condition_dim pooled_projection_dim = 3 * 2 * condition_dim inner_dim = num_attention_heads * attention_head_dim out_channels = out_channels # 1. RoPE self.rope = CogView4RotaryPosEmbed(attention_head_dim, patch_size, rope_axes_dim, theta=10000.0) # 2. Patch & Text-timestep embedding self.patch_embed = CogView4PatchEmbed(in_channels, inner_dim, patch_size, text_embed_dim) self.time_condition_embed = CogView3CombinedTimestepSizeEmbeddings( embedding_dim=time_embed_dim, condition_dim=condition_dim, pooled_projection_dim=pooled_projection_dim, timesteps_dim=inner_dim, ) # 3. Transformer blocks self.transformer_blocks = nn.ModuleList( [ CogView4TransformerBlock(inner_dim, num_attention_heads, attention_head_dim, time_embed_dim) for _ in range(num_layers) ] ) # 4. Output projection self.norm_out = CogView4AdaLayerNormContinuous(inner_dim, time_embed_dim, elementwise_affine=False) self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels, bias=True) self.gradient_checkpointing = False @apply_lora_scale("attention_kwargs") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, timestep: torch.LongTensor, original_size: torch.Tensor, target_size: torch.Tensor, crop_coords: torch.Tensor, attention_kwargs: dict[str, Any] | None = None, return_dict: bool = True, attention_mask: torch.Tensor | None = None, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | list[tuple[torch.Tensor, torch.Tensor]] | None = None, ) -> tuple[torch.Tensor] | Transformer2DModelOutput: batch_size, num_channels, height, width = hidden_states.shape # 1. RoPE if image_rotary_emb is None: image_rotary_emb = self.rope(hidden_states) # 2. Patch & Timestep embeddings p = self.config.patch_size post_patch_height = height // p post_patch_width = width // p hidden_states, encoder_hidden_states = self.patch_embed(hidden_states, encoder_hidden_states) temb = self.time_condition_embed(timestep, original_size, target_size, crop_coords, hidden_states.dtype) temb = F.silu(temb) # 3. Transformer blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states, encoder_hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, attention_mask, attention_kwargs, ) else: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, image_rotary_emb, attention_mask, attention_kwargs, ) # 4. Output norm & projection hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) # 5. Unpatchify hidden_states = hidden_states.reshape(batch_size, post_patch_height, post_patch_width, -1, p, p) output = hidden_states.permute(0, 3, 1, 4, 2, 5).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_cogview4.py", "license": "Apache License 2.0", "lines": 653, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/cogview4/pipeline_cogview4.py
# Copyright 2025 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import numpy as np import torch from transformers import AutoTokenizer, GlmModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import VaeImageProcessor from ...loaders import CogView4LoraLoaderMixin from ...models import AutoencoderKL, CogView4Transformer2DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from .pipeline_output import CogView4PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import CogView4Pipeline >>> pipe = CogView4Pipeline.from_pretrained("THUDM/CogView4-6B", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> prompt = "A photo of an astronaut riding a horse on mars" >>> image = pipe(prompt).images[0] >>> image.save("output.png") ``` """ def calculate_shift( image_seq_len, base_seq_len: int = 256, base_shift: float = 0.25, max_shift: float = 0.75, ) -> float: m = (image_seq_len / base_seq_len) ** 0.5 mu = m * max_shift + base_shift return mu def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) accepts_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if timesteps is not None and sigmas is not None: if not accepts_timesteps and not accepts_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep or sigma schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif timesteps is not None and sigmas is None: if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif timesteps is None and sigmas is not None: if not accepts_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class CogView4Pipeline(DiffusionPipeline, CogView4LoraLoaderMixin): r""" Pipeline for text-to-image generation using CogView4. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`GLMModel`]): Frozen text-encoder. CogView4 uses [glm-4-9b-hf](https://huggingface.co/THUDM/glm-4-9b-hf). tokenizer (`PreTrainedTokenizer`): Tokenizer of class [PreTrainedTokenizer](https://huggingface.co/docs/transformers/main/en/main_classes/tokenizer#transformers.PreTrainedTokenizer). transformer ([`CogView4Transformer2DModel`]): A text conditioned `CogView4Transformer2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ _optional_components = [] model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: GlmModel, vae: AutoencoderKL, transformer: CogView4Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _get_glm_embeds( self, prompt: str | list[str] = None, max_sequence_length: int = 1024, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt text_inputs = self.tokenizer( prompt, padding="longest", # not use max length max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) current_length = text_input_ids.shape[1] pad_length = (16 - (current_length % 16)) % 16 if pad_length > 0: pad_ids = torch.full( (text_input_ids.shape[0], pad_length), fill_value=self.tokenizer.pad_token_id, dtype=text_input_ids.dtype, device=text_input_ids.device, ) text_input_ids = torch.cat([pad_ids, text_input_ids], dim=1) prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=True).hidden_states[-2] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) return prompt_embeds def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_images_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, device: torch.device | None = None, dtype: torch.dtype | None = None, max_sequence_length: int = 1024, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_images_per_prompt (`int`, *optional*, defaults to 1): Number of images that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype max_sequence_length (`int`, defaults to `1024`): Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results. """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_glm_embeds(prompt, max_sequence_length, device, dtype) seq_len = prompt_embeds.size(1) prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_glm_embeds(negative_prompt, max_sequence_length, device, dtype) seq_len = negative_prompt_embeds.size(1) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds, negative_prompt_embeds def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): if latents is not None: return latents.to(device) shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def check_inputs( self, prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape[0] != negative_prompt_embeds.shape[0]: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same batch size when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} and `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds.shape[-1] != negative_prompt_embeds.shape[-1]: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same dimension when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} and `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) @property def guidance_scale(self): return self._guidance_scale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] | None = None, negative_prompt: str | list[str] | None = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, timesteps: list[int] | None = None, sigmas: list[float] | None = None, guidance_scale: float = 5.0, num_images_per_prompt: int = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: torch.FloatTensor | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, original_size: tuple[int, int] | None = None, crops_coords_top_left: tuple[int, int] = (0, 0), output_type: str = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 1024, ) -> CogView4PipelineOutput | tuple: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. If not provided, it is set to 1024. width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. If not provided it is set to 1024. num_inference_steps (`int`, *optional*, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`list[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to `5.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to `1`): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. original_size (`tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `224`): Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results. Examples: Returns: [`~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput`] or `tuple`: [`~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = (height, width) # Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, callback_on_step_end_tensor_inputs, prompt_embeds, negative_prompt_embeds, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False # Default call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, negative_prompt, self.do_classifier_free_guidance, num_images_per_prompt=num_images_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) # Prepare latents latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, torch.float32, device, generator, latents, ) # Prepare additional timestep conditions original_size = torch.tensor([original_size], dtype=prompt_embeds.dtype, device=device) target_size = torch.tensor([target_size], dtype=prompt_embeds.dtype, device=device) crops_coords_top_left = torch.tensor([crops_coords_top_left], dtype=prompt_embeds.dtype, device=device) original_size = original_size.repeat(batch_size * num_images_per_prompt, 1) target_size = target_size.repeat(batch_size * num_images_per_prompt, 1) crops_coords_top_left = crops_coords_top_left.repeat(batch_size * num_images_per_prompt, 1) # Prepare timesteps image_seq_len = ((height // self.vae_scale_factor) * (width // self.vae_scale_factor)) // ( self.transformer.config.patch_size**2 ) timesteps = ( np.linspace(self.scheduler.config.num_train_timesteps, 1.0, num_inference_steps) if timesteps is None else np.array(timesteps) ) timesteps = timesteps.astype(np.int64).astype(np.float32) sigmas = timesteps / self.scheduler.config.num_train_timesteps if sigmas is None else sigmas mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("base_shift", 0.25), self.scheduler.config.get("max_shift", 0.75), ) if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps, sigmas, mu=mu ) self._num_timesteps = len(timesteps) # Denoising loop transformer_dtype = self.transformer.dtype num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = latents.to(transformer_dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]) with self.transformer.cache_context("cond"): noise_pred_cond = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=prompt_embeds, timestep=timestep, original_size=original_size, target_size=target_size, crop_coords=crops_coords_top_left, attention_kwargs=attention_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: with self.transformer.cache_context("uncond"): noise_pred_uncond = self.transformer( hidden_states=latent_model_input, encoder_hidden_states=negative_prompt_embeds, timestep=timestep, original_size=original_size, target_size=target_size, crop_coords=crops_coords_top_left, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) else: noise_pred = noise_pred_cond latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] # call the callback, if provided if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, self.scheduler.sigmas[i], callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False, generator=generator)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return CogView4PipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/cogview4/pipeline_cogview4.py", "license": "Apache License 2.0", "lines": 605, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/cogview4/pipeline_output.py
from dataclasses import dataclass import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class CogView4PipelineOutput(BaseOutput): """ Output class for CogView3 pipelines. Args: images (`list[PIL.Image.Image]` or `np.ndarray`) list of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: list[PIL.Image.Image] | np.ndarray
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/cogview4/pipeline_output.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:tests/models/transformers/test_models_transformer_cogview4.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import CogView4Transformer2DModel from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class CogView3PlusTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = CogView4Transformer2DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 2 num_channels = 4 height = 8 width = 8 embedding_dim = 8 sequence_length = 8 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) original_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) target_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) crop_coords = torch.tensor([0, 0]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, "original_size": original_size, "target_size": target_size, "crop_coords": crop_coords, } @property def input_shape(self): return (4, 8, 8) @property def output_shape(self): return (4, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "patch_size": 2, "in_channels": 4, "num_layers": 2, "attention_head_dim": 4, "num_attention_heads": 4, "out_channels": 4, "text_embed_dim": 8, "time_embed_dim": 8, "condition_dim": 4, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"CogView4Transformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_cogview4.py", "license": "Apache License 2.0", "lines": 68, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/cogview4/test_cogview4.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from transformers import AutoTokenizer, GlmConfig, GlmForCausalLM from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class CogView4PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = CogView4Pipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False test_xformers_attention = False test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) transformer = CogView4Transformer2DModel( patch_size=2, in_channels=4, num_layers=2, attention_head_dim=4, num_attention_heads=4, out_channels=4, text_embed_dim=32, time_embed_dim=8, condition_dim=4, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler( base_shift=0.25, max_shift=0.75, base_image_seq_len=256, use_dynamic_shifting=True, time_shift_type="linear", ) torch.manual_seed(0) text_encoder_config = GlmConfig( hidden_size=32, intermediate_size=8, num_hidden_layers=2, num_attention_heads=4, head_dim=8 ) text_encoder = GlmForCausalLM(text_encoder_config) # TODO(aryan): change this to THUDM/CogView4 once released tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat", trust_remote_code=True) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "bad", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 16, "width": 16, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs)[0] generated_image = image[0] self.assertEqual(generated_image.shape, (3, 16, 16)) expected_image = torch.randn(3, 16, 16) max_diff = np.abs(generated_image - expected_image).max() self.assertLessEqual(max_diff, 1e10) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", )
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/cogview4/test_cogview4.py", "license": "Apache License 2.0", "lines": 195, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/hooks/group_offloading.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import os from contextlib import contextmanager, nullcontext from dataclasses import dataclass, replace from enum import Enum from typing import Set import safetensors.torch import torch from ..utils import get_logger, is_accelerate_available from ._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS from .hooks import HookRegistry, ModelHook if is_accelerate_available(): from accelerate.hooks import AlignDevicesHook, CpuOffload from accelerate.utils import send_to_device logger = get_logger(__name__) # pylint: disable=invalid-name # fmt: off _GROUP_OFFLOADING = "group_offloading" _LAYER_EXECUTION_TRACKER = "layer_execution_tracker" _LAZY_PREFETCH_GROUP_OFFLOADING = "lazy_prefetch_group_offloading" _GROUP_ID_LAZY_LEAF = "lazy_leafs" # fmt: on class GroupOffloadingType(str, Enum): BLOCK_LEVEL = "block_level" LEAF_LEVEL = "leaf_level" @dataclass class GroupOffloadingConfig: onload_device: torch.device offload_device: torch.device offload_type: GroupOffloadingType non_blocking: bool record_stream: bool low_cpu_mem_usage: bool num_blocks_per_group: int | None = None offload_to_disk_path: str | None = None stream: torch.cuda.Stream | torch.Stream | None = None block_modules: list[str] | None = None exclude_kwargs: list[str] | None = None module_prefix: str = "" class ModuleGroup: def __init__( self, modules: list[torch.nn.Module], offload_device: torch.device, onload_device: torch.device, offload_leader: torch.nn.Module, onload_leader: torch.nn.Module | None = None, parameters: list[torch.nn.Parameter] | None = None, buffers: list[torch.Tensor] | None = None, non_blocking: bool = False, stream: torch.cuda.Stream | torch.Stream | None = None, record_stream: bool | None = False, low_cpu_mem_usage: bool = False, onload_self: bool = True, offload_to_disk_path: str | None = None, group_id: int | str | None = None, ) -> None: self.modules = modules self.offload_device = offload_device self.onload_device = onload_device self.offload_leader = offload_leader self.onload_leader = onload_leader self.parameters = parameters or [] self.buffers = buffers or [] self.non_blocking = non_blocking or stream is not None self.stream = stream self.record_stream = record_stream self.onload_self = onload_self self.low_cpu_mem_usage = low_cpu_mem_usage self.offload_to_disk_path = offload_to_disk_path self._is_offloaded_to_disk = False if self.offload_to_disk_path is not None: # Instead of `group_id or str(id(self))` we do this because `group_id` can be "" as well. self.group_id = group_id if group_id is not None else str(id(self)) short_hash = _compute_group_hash(self.group_id) self.safetensors_file_path = os.path.join(self.offload_to_disk_path, f"group_{short_hash}.safetensors") all_tensors = [] for module in self.modules: all_tensors.extend(list(module.parameters())) all_tensors.extend(list(module.buffers())) all_tensors.extend(self.parameters) all_tensors.extend(self.buffers) all_tensors = list(dict.fromkeys(all_tensors)) # Remove duplicates self.tensor_to_key = {tensor: f"tensor_{i}" for i, tensor in enumerate(all_tensors)} self.key_to_tensor = {v: k for k, v in self.tensor_to_key.items()} self.cpu_param_dict = {} else: self.cpu_param_dict = self._init_cpu_param_dict() self._torch_accelerator_module = ( getattr(torch, torch.accelerator.current_accelerator().type) if hasattr(torch, "accelerator") else torch.cuda ) def _init_cpu_param_dict(self): cpu_param_dict = {} if self.stream is None: return cpu_param_dict for module in self.modules: for param in module.parameters(): cpu_param_dict[param] = param.data.cpu() if self.low_cpu_mem_usage else param.data.cpu().pin_memory() for buffer in module.buffers(): cpu_param_dict[buffer] = ( buffer.data.cpu() if self.low_cpu_mem_usage else buffer.data.cpu().pin_memory() ) for param in self.parameters: cpu_param_dict[param] = param.data.cpu() if self.low_cpu_mem_usage else param.data.cpu().pin_memory() for buffer in self.buffers: cpu_param_dict[buffer] = buffer.data.cpu() if self.low_cpu_mem_usage else buffer.data.cpu().pin_memory() return cpu_param_dict @contextmanager def _pinned_memory_tensors(self): try: pinned_dict = { param: tensor.pin_memory() if not tensor.is_pinned() else tensor for param, tensor in self.cpu_param_dict.items() } yield pinned_dict finally: pinned_dict = None def _transfer_tensor_to_device(self, tensor, source_tensor, default_stream): tensor.data = source_tensor.to(self.onload_device, non_blocking=self.non_blocking) if self.record_stream: tensor.data.record_stream(default_stream) def _process_tensors_from_modules(self, pinned_memory=None, default_stream=None): for group_module in self.modules: for param in group_module.parameters(): source = pinned_memory[param] if pinned_memory else param.data self._transfer_tensor_to_device(param, source, default_stream) for buffer in group_module.buffers(): source = pinned_memory[buffer] if pinned_memory else buffer.data self._transfer_tensor_to_device(buffer, source, default_stream) for param in self.parameters: source = pinned_memory[param] if pinned_memory else param.data self._transfer_tensor_to_device(param, source, default_stream) for buffer in self.buffers: source = pinned_memory[buffer] if pinned_memory else buffer.data self._transfer_tensor_to_device(buffer, source, default_stream) def _onload_from_disk(self): if self.stream is not None: # Wait for previous Host->Device transfer to complete self.stream.synchronize() context = nullcontext() if self.stream is None else self._torch_accelerator_module.stream(self.stream) current_stream = self._torch_accelerator_module.current_stream() if self.record_stream else None with context: # Load to CPU (if using streams) or directly to target device, pin, and async copy to device device = str(self.onload_device) if self.stream is None else "cpu" loaded_tensors = safetensors.torch.load_file(self.safetensors_file_path, device=device) if self.stream is not None: for key, tensor_obj in self.key_to_tensor.items(): pinned_tensor = loaded_tensors[key].pin_memory() tensor_obj.data = pinned_tensor.to(self.onload_device, non_blocking=self.non_blocking) if self.record_stream: tensor_obj.data.record_stream(current_stream) else: onload_device = ( self.onload_device.type if isinstance(self.onload_device, torch.device) else self.onload_device ) loaded_tensors = safetensors.torch.load_file(self.safetensors_file_path, device=onload_device) for key, tensor_obj in self.key_to_tensor.items(): tensor_obj.data = loaded_tensors[key] def _onload_from_memory(self): if self.stream is not None: # Wait for previous Host->Device transfer to complete self.stream.synchronize() context = nullcontext() if self.stream is None else self._torch_accelerator_module.stream(self.stream) default_stream = self._torch_accelerator_module.current_stream() if self.stream is not None else None with context: if self.stream is not None: with self._pinned_memory_tensors() as pinned_memory: self._process_tensors_from_modules(pinned_memory, default_stream=default_stream) else: self._process_tensors_from_modules(None) def _offload_to_disk(self): # TODO: we can potentially optimize this code path by checking if the _all_ the desired # safetensor files exist on the disk and if so, skip this step entirely, reducing IO # overhead. Currently, we just check if the given `safetensors_file_path` exists and if not # we perform a write. # Check if the file has been saved in this session or if it already exists on disk. if not self._is_offloaded_to_disk and not os.path.exists(self.safetensors_file_path): os.makedirs(os.path.dirname(self.safetensors_file_path), exist_ok=True) tensors_to_save = {key: tensor.data.to(self.offload_device) for tensor, key in self.tensor_to_key.items()} safetensors.torch.save_file(tensors_to_save, self.safetensors_file_path) # The group is now considered offloaded to disk for the rest of the session. self._is_offloaded_to_disk = True # We do this to free up the RAM which is still holding the up tensor data. for tensor_obj in self.tensor_to_key.keys(): tensor_obj.data = torch.empty_like(tensor_obj.data, device=self.offload_device) def _offload_to_memory(self): if self.stream is not None: if not self.record_stream: self._torch_accelerator_module.current_stream().synchronize() for group_module in self.modules: for param in group_module.parameters(): param.data = self.cpu_param_dict[param] for param in self.parameters: param.data = self.cpu_param_dict[param] for buffer in self.buffers: buffer.data = self.cpu_param_dict[buffer] else: for group_module in self.modules: group_module.to(self.offload_device, non_blocking=False) for param in self.parameters: param.data = param.data.to(self.offload_device, non_blocking=False) for buffer in self.buffers: buffer.data = buffer.data.to(self.offload_device, non_blocking=False) @torch.compiler.disable() def onload_(self): r"""Onloads the group of parameters to the onload_device.""" if self.offload_to_disk_path is not None: self._onload_from_disk() else: self._onload_from_memory() @torch.compiler.disable() def offload_(self): r"""Offloads the group of parameters to the offload_device.""" if self.offload_to_disk_path: self._offload_to_disk() else: self._offload_to_memory() class GroupOffloadingHook(ModelHook): r""" A hook that offloads groups of torch.nn.Module to the CPU for storage and onloads to accelerator device for computation. Each group has one "onload leader" module that is responsible for onloading, and an "offload leader" module that is responsible for offloading. If prefetching is enabled, the onload leader of the previous module group is responsible for onloading the current module group. """ _is_stateful = False def __init__(self, group: ModuleGroup, *, config: GroupOffloadingConfig) -> None: self.group = group self.next_group: ModuleGroup | None = None self.config = config def initialize_hook(self, module: torch.nn.Module) -> torch.nn.Module: if self.group.offload_leader == module: self.group.offload_() return module def pre_forward(self, module: torch.nn.Module, *args, **kwargs): # If there wasn't an onload_leader assigned, we assume that the submodule that first called its forward # method is the onload_leader of the group. if self.group.onload_leader is None: self.group.onload_leader = module # If the current module is the onload_leader of the group, we onload the group if it is supposed # to onload itself. In the case of using prefetching with streams, we onload the next group if # it is not supposed to onload itself. if self.group.onload_leader == module: if self.group.onload_self: self.group.onload_() should_onload_next_group = self.next_group is not None and not self.next_group.onload_self if should_onload_next_group: self.next_group.onload_() should_synchronize = ( not self.group.onload_self and self.group.stream is not None and not should_onload_next_group ) if should_synchronize: # If this group didn't onload itself, it means it was asynchronously onloaded by the # previous group. We need to synchronize the side stream to ensure parameters # are completely loaded to proceed with forward pass. Without this, uninitialized # weights will be used in the computation, leading to incorrect results # Also, we should only do this synchronization if we don't already do it from the sync call in # self.next_group.onload_, hence the `not should_onload_next_group` check. self.group.stream.synchronize() args = send_to_device(args, self.group.onload_device, non_blocking=self.group.non_blocking) # Some Autoencoder models use a feature cache that is passed through submodules # and modified in place. The `send_to_device` call returns a copy of this feature cache object # which breaks the inplace updates. Use `exclude_kwargs` to mark these cache features exclude_kwargs = self.config.exclude_kwargs or [] if exclude_kwargs: moved_kwargs = send_to_device( {k: v for k, v in kwargs.items() if k not in exclude_kwargs}, self.group.onload_device, non_blocking=self.group.non_blocking, ) kwargs.update(moved_kwargs) else: kwargs = send_to_device(kwargs, self.group.onload_device, non_blocking=self.group.non_blocking) return args, kwargs def post_forward(self, module: torch.nn.Module, output): if self.group.offload_leader == module: self.group.offload_() return output class LazyPrefetchGroupOffloadingHook(ModelHook): r""" A hook, used in conjunction with GroupOffloadingHook, that applies lazy prefetching to groups of torch.nn.Module. This hook is used to determine the order in which the layers are executed during the forward pass. Once the layer invocation order is known, assignments of the next_group attribute for prefetching can be made, which allows prefetching groups in the correct order. """ _is_stateful = False def __init__(self): self.execution_order: list[tuple[str, torch.nn.Module]] = [] self._layer_execution_tracker_module_names = set() def initialize_hook(self, module): def make_execution_order_update_callback(current_name, current_submodule): def callback(): if not torch.compiler.is_compiling(): logger.debug(f"Adding {current_name} to the execution order") self.execution_order.append((current_name, current_submodule)) return callback # To every submodule that contains a group offloading hook (at this point, no prefetching is enabled for any # of the groups), we add a layer execution tracker hook that will be used to determine the order in which the # layers are executed during the forward pass. for name, submodule in module.named_modules(): if name == "" or not hasattr(submodule, "_diffusers_hook"): continue registry = HookRegistry.check_if_exists_or_initialize(submodule) group_offloading_hook = registry.get_hook(_GROUP_OFFLOADING) if group_offloading_hook is not None: # For the first forward pass, we have to load in a blocking manner group_offloading_hook.group.non_blocking = False layer_tracker_hook = LayerExecutionTrackerHook(make_execution_order_update_callback(name, submodule)) registry.register_hook(layer_tracker_hook, _LAYER_EXECUTION_TRACKER) self._layer_execution_tracker_module_names.add(name) return module def post_forward(self, module, output): # At this point, for the current modules' submodules, we know the execution order of the layers. We can now # remove the layer execution tracker hooks and apply prefetching by setting the next_group attribute for each # group offloading hook. num_executed = len(self.execution_order) execution_order_module_names = {name for name, _ in self.execution_order} # It may be possible that some layers were not executed during the forward pass. This can happen if the layer # is not used in the forward pass, or if the layer is not executed due to some other reason. In such cases, we # may not be able to apply prefetching in the correct order, which can lead to device-mismatch related errors # if the missing layers end up being executed in the future. if execution_order_module_names != self._layer_execution_tracker_module_names: unexecuted_layers = list(self._layer_execution_tracker_module_names - execution_order_module_names) if not torch.compiler.is_compiling(): logger.warning( "It seems like some layers were not executed during the forward pass. This may lead to problems when " "applying lazy prefetching with automatic tracing and lead to device-mismatch related errors. Please " "make sure that all layers are executed during the forward pass. The following layers were not executed:\n" f"{unexecuted_layers=}" ) # Remove the layer execution tracker hooks from the submodules base_module_registry = module._diffusers_hook registries = [submodule._diffusers_hook for _, submodule in self.execution_order] group_offloading_hooks = [registry.get_hook(_GROUP_OFFLOADING) for registry in registries] for i in range(num_executed): registries[i].remove_hook(_LAYER_EXECUTION_TRACKER, recurse=False) # Remove the current lazy prefetch group offloading hook so that it doesn't interfere with the next forward pass base_module_registry.remove_hook(_LAZY_PREFETCH_GROUP_OFFLOADING, recurse=False) # LazyPrefetchGroupOffloadingHook is only used with streams, so we know that non_blocking should be True. # We disable non_blocking for the first forward pass, but need to enable it for the subsequent passes to # see the benefits of prefetching. for hook in group_offloading_hooks: hook.group.non_blocking = True # Set required attributes for prefetching if num_executed > 0: base_module_group_offloading_hook = base_module_registry.get_hook(_GROUP_OFFLOADING) base_module_group_offloading_hook.next_group = group_offloading_hooks[0].group base_module_group_offloading_hook.next_group.onload_self = False for i in range(num_executed - 1): name1, _ = self.execution_order[i] name2, _ = self.execution_order[i + 1] if not torch.compiler.is_compiling(): logger.debug(f"Applying lazy prefetch group offloading from {name1} to {name2}") group_offloading_hooks[i].next_group = group_offloading_hooks[i + 1].group group_offloading_hooks[i].next_group.onload_self = False return output class LayerExecutionTrackerHook(ModelHook): r""" A hook that tracks the order in which the layers are executed during the forward pass by calling back to the LazyPrefetchGroupOffloadingHook to update the execution order. """ _is_stateful = False def __init__(self, execution_order_update_callback): self.execution_order_update_callback = execution_order_update_callback def pre_forward(self, module, *args, **kwargs): self.execution_order_update_callback() return args, kwargs def apply_group_offloading( module: torch.nn.Module, onload_device: str | torch.device, offload_device: str | torch.device = torch.device("cpu"), offload_type: str | GroupOffloadingType = "block_level", num_blocks_per_group: int | None = None, non_blocking: bool = False, use_stream: bool = False, record_stream: bool = False, low_cpu_mem_usage: bool = False, offload_to_disk_path: str | None = None, block_modules: list[str] | None = None, exclude_kwargs: list[str] | None = None, ) -> None: r""" Applies group offloading to the internal layers of a torch.nn.Module. To understand what group offloading is, and where it is beneficial, we need to first provide some context on how other supported offloading methods work. Typically, offloading is done at two levels: - Module-level: In Diffusers, this can be enabled using the `ModelMixin::enable_model_cpu_offload()` method. It works by offloading each component of a pipeline to the CPU for storage, and onloading to the accelerator device when needed for computation. This method is more memory-efficient than keeping all components on the accelerator, but the memory requirements are still quite high. For this method to work, one needs memory equivalent to size of the model in runtime dtype + size of largest intermediate activation tensors to be able to complete the forward pass. - Leaf-level: In Diffusers, this can be enabled using the `ModelMixin::enable_sequential_cpu_offload()` method. It works by offloading the lowest leaf-level parameters of the computation graph to the CPU for storage, and onloading only the leafs to the accelerator device for computation. This uses the lowest amount of accelerator memory, but can be slower due to the excessive number of device synchronizations. Group offloading is a middle ground between the two methods. It works by offloading groups of internal layers, (either `torch.nn.ModuleList` or `torch.nn.Sequential`). This method uses lower memory than module-level offloading. It is also faster than leaf-level/sequential offloading, as the number of device synchronizations is reduced. Another supported feature (for CUDA devices with support for asynchronous data transfer streams) is the ability to overlap data transfer and computation to reduce the overall execution time compared to sequential offloading. This is enabled using layer prefetching with streams, i.e., the layer that is to be executed next starts onloading to the accelerator device while the current layer is being executed - this increases the memory requirements slightly. Note that this implementation also supports leaf-level offloading but can be made much faster when using streams. Args: module (`torch.nn.Module`): The module to which group offloading is applied. onload_device (`torch.device`): The device to which the group of modules are onloaded. offload_device (`torch.device`, defaults to `torch.device("cpu")`): The device to which the group of modules are offloaded. This should typically be the CPU. Default is CPU. offload_type (`str` or `GroupOffloadingType`, defaults to "block_level"): The type of offloading to be applied. Can be one of "block_level" or "leaf_level". Default is "block_level". offload_to_disk_path (`str`, *optional*, defaults to `None`): The path to the directory where parameters will be offloaded. Setting this option can be useful in limited RAM environment settings where a reasonable speed-memory trade-off is desired. num_blocks_per_group (`int`, *optional*): The number of blocks per group when using offload_type="block_level". This is required when using offload_type="block_level". non_blocking (`bool`, defaults to `False`): If True, offloading and onloading is done with non-blocking data transfer. use_stream (`bool`, defaults to `False`): If True, offloading and onloading is done asynchronously using a CUDA stream. This can be useful for overlapping computation and data transfer. record_stream (`bool`, defaults to `False`): When enabled with `use_stream`, it marks the current tensor as having been used by this stream. It is faster at the expense of slightly more memory usage. Refer to the [PyTorch official docs](https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html) more details. low_cpu_mem_usage (`bool`, defaults to `False`): If True, the CPU memory usage is minimized by pinning tensors on-the-fly instead of pre-pinning them. This option only matters when using streamed CPU offloading (i.e. `use_stream=True`). This can be useful when the CPU memory is a bottleneck but may counteract the benefits of using streams. block_modules (`list[str]`, *optional*): List of module names that should be treated as blocks for offloading. If provided, only these modules will be considered for block-level offloading. If not provided, the default block detection logic will be used. exclude_kwargs (`list[str]`, *optional*): List of kwarg keys that should not be processed by send_to_device. This is useful for mutable state like caching lists that need to maintain their object identity across forward passes. If not provided, will be inferred from the module's `_skip_keys` attribute if it exists. Example: ```python >>> from diffusers import CogVideoXTransformer3DModel >>> from diffusers.hooks import apply_group_offloading >>> transformer = CogVideoXTransformer3DModel.from_pretrained( ... "THUDM/CogVideoX-5b", subfolder="transformer", torch_dtype=torch.bfloat16 ... ) >>> apply_group_offloading( ... transformer, ... onload_device=torch.device("cuda"), ... offload_device=torch.device("cpu"), ... offload_type="block_level", ... num_blocks_per_group=2, ... use_stream=True, ... ) ``` """ onload_device = torch.device(onload_device) if isinstance(onload_device, str) else onload_device offload_device = torch.device(offload_device) if isinstance(offload_device, str) else offload_device offload_type = GroupOffloadingType(offload_type) stream = None if use_stream: if torch.cuda.is_available(): stream = torch.cuda.Stream() elif hasattr(torch, "xpu") and torch.xpu.is_available(): stream = torch.Stream() else: raise ValueError("Using streams for data transfer requires a CUDA device, or an Intel XPU device.") if not use_stream and record_stream: raise ValueError("`record_stream` cannot be True when `use_stream=False`.") if offload_type == GroupOffloadingType.BLOCK_LEVEL and num_blocks_per_group is None: raise ValueError("`num_blocks_per_group` must be provided when using `offload_type='block_level'.") _raise_error_if_accelerate_model_or_sequential_hook_present(module) if block_modules is None: block_modules = getattr(module, "_group_offload_block_modules", None) if exclude_kwargs is None: exclude_kwargs = getattr(module, "_skip_keys", None) config = GroupOffloadingConfig( onload_device=onload_device, offload_device=offload_device, offload_type=offload_type, num_blocks_per_group=num_blocks_per_group, non_blocking=non_blocking, stream=stream, record_stream=record_stream, low_cpu_mem_usage=low_cpu_mem_usage, offload_to_disk_path=offload_to_disk_path, block_modules=block_modules, exclude_kwargs=exclude_kwargs, ) _apply_group_offloading(module, config) def _apply_group_offloading(module: torch.nn.Module, config: GroupOffloadingConfig) -> None: if config.offload_type == GroupOffloadingType.BLOCK_LEVEL: _apply_group_offloading_block_level(module, config) elif config.offload_type == GroupOffloadingType.LEAF_LEVEL: _apply_group_offloading_leaf_level(module, config) else: assert False def _apply_group_offloading_block_level(module: torch.nn.Module, config: GroupOffloadingConfig) -> None: r""" This function applies offloading to groups of torch.nn.ModuleList or torch.nn.Sequential blocks, and explicitly defined block modules. In comparison to the "leaf_level" offloading, which is more fine-grained, this offloading is done at the top-level blocks and modules specified in block_modules. When block_modules is provided, only those modules will be treated as blocks for offloading. For each specified module, recursively apply block offloading to it. """ if config.stream is not None and config.num_blocks_per_group != 1: logger.warning( f"Using streams is only supported for num_blocks_per_group=1. Got {config.num_blocks_per_group=}. Setting it to 1." ) config.num_blocks_per_group = 1 block_modules = set(config.block_modules) if config.block_modules is not None else set() # Create module groups for ModuleList and Sequential blocks, and explicitly defined block modules modules_with_group_offloading = set() unmatched_modules = [] matched_module_groups = [] for name, submodule in module.named_children(): # Check if this is an explicitly defined block module if name in block_modules: # Track submodule using a prefix to avoid filename collisions during disk offload. # Without this, submodules sharing the same model class would be assigned identical # filenames (derived from the class name). prefix = f"{config.module_prefix}{name}." if config.module_prefix else f"{name}." submodule_config = replace(config, module_prefix=prefix) _apply_group_offloading_block_level(submodule, submodule_config) modules_with_group_offloading.add(name) elif isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)): # Handle ModuleList and Sequential blocks as before for i in range(0, len(submodule), config.num_blocks_per_group): current_modules = list(submodule[i : i + config.num_blocks_per_group]) if len(current_modules) == 0: continue group_id = f"{config.module_prefix}{name}_{i}_{i + len(current_modules) - 1}" group = ModuleGroup( modules=current_modules, offload_device=config.offload_device, onload_device=config.onload_device, offload_to_disk_path=config.offload_to_disk_path, offload_leader=current_modules[-1], onload_leader=current_modules[0], non_blocking=config.non_blocking, stream=config.stream, record_stream=config.record_stream, low_cpu_mem_usage=config.low_cpu_mem_usage, onload_self=True, group_id=group_id, ) matched_module_groups.append(group) for j in range(i, i + len(current_modules)): modules_with_group_offloading.add(f"{name}.{j}") else: # This is an unmatched module unmatched_modules.append((name, submodule)) # Apply group offloading hooks to the module groups for i, group in enumerate(matched_module_groups): for group_module in group.modules: _apply_group_offloading_hook(group_module, group, config=config) # Parameters and Buffers of the top-level module need to be offloaded/onloaded separately # when the forward pass of this module is called. This is because the top-level module is not # part of any group (as doing so would lead to no VRAM savings). parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading) buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading) parameters = [param for _, param in parameters] buffers = [buffer for _, buffer in buffers] # Create a group for the remaining unmatched submodules of the top-level # module so that they are on the correct device when the forward pass is called. unmatched_modules = [unmatched_module for _, unmatched_module in unmatched_modules] if len(unmatched_modules) > 0 or len(parameters) > 0 or len(buffers) > 0: unmatched_group = ModuleGroup( modules=unmatched_modules, offload_device=config.offload_device, onload_device=config.onload_device, offload_to_disk_path=config.offload_to_disk_path, offload_leader=module, onload_leader=module, parameters=parameters, buffers=buffers, non_blocking=False, stream=None, record_stream=False, onload_self=True, group_id=f"{config.module_prefix}{module.__class__.__name__}_unmatched_group", ) if config.stream is None: _apply_group_offloading_hook(module, unmatched_group, config=config) else: _apply_lazy_group_offloading_hook(module, unmatched_group, config=config) def _apply_group_offloading_leaf_level(module: torch.nn.Module, config: GroupOffloadingConfig) -> None: r""" This function applies offloading to groups of leaf modules in a torch.nn.Module. This method has minimal memory requirements. However, it can be slower compared to other offloading methods due to the excessive number of device synchronizations. When using devices that support streams to overlap data transfer and computation, this method can reduce memory usage without any performance degradation. """ # Create module groups for leaf modules and apply group offloading hooks modules_with_group_offloading = set() for name, submodule in module.named_modules(): if not isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): continue group = ModuleGroup( modules=[submodule], offload_device=config.offload_device, onload_device=config.onload_device, offload_to_disk_path=config.offload_to_disk_path, offload_leader=submodule, onload_leader=submodule, non_blocking=config.non_blocking, stream=config.stream, record_stream=config.record_stream, low_cpu_mem_usage=config.low_cpu_mem_usage, onload_self=True, group_id=name, ) _apply_group_offloading_hook(submodule, group, config=config) modules_with_group_offloading.add(name) # Parameters and Buffers at all non-leaf levels need to be offloaded/onloaded separately when the forward pass # of the module is called module_dict = dict(module.named_modules()) parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading) buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading) # Find closest module parent for each parameter and buffer, and attach group hooks parent_to_parameters = {} for name, param in parameters: parent_name = _find_parent_module_in_module_dict(name, module_dict) if parent_name in parent_to_parameters: parent_to_parameters[parent_name].append(param) else: parent_to_parameters[parent_name] = [param] parent_to_buffers = {} for name, buffer in buffers: parent_name = _find_parent_module_in_module_dict(name, module_dict) if parent_name in parent_to_buffers: parent_to_buffers[parent_name].append(buffer) else: parent_to_buffers[parent_name] = [buffer] parent_names = set(parent_to_parameters.keys()) | set(parent_to_buffers.keys()) for name in parent_names: parameters = parent_to_parameters.get(name, []) buffers = parent_to_buffers.get(name, []) parent_module = module_dict[name] group = ModuleGroup( modules=[], offload_device=config.offload_device, onload_device=config.onload_device, offload_leader=parent_module, onload_leader=parent_module, offload_to_disk_path=config.offload_to_disk_path, parameters=parameters, buffers=buffers, non_blocking=config.non_blocking, stream=config.stream, record_stream=config.record_stream, low_cpu_mem_usage=config.low_cpu_mem_usage, onload_self=True, group_id=name, ) _apply_group_offloading_hook(parent_module, group, config=config) if config.stream is not None: # When using streams, we need to know the layer execution order for applying prefetching (to overlap data transfer # and computation). Since we don't know the order beforehand, we apply a lazy prefetching hook that will find the # execution order and apply prefetching in the correct order. unmatched_group = ModuleGroup( modules=[], offload_device=config.offload_device, onload_device=config.onload_device, offload_to_disk_path=config.offload_to_disk_path, offload_leader=module, onload_leader=module, parameters=None, buffers=None, non_blocking=False, stream=None, record_stream=False, low_cpu_mem_usage=config.low_cpu_mem_usage, onload_self=True, group_id=_GROUP_ID_LAZY_LEAF, ) _apply_lazy_group_offloading_hook(module, unmatched_group, config=config) def _apply_group_offloading_hook( module: torch.nn.Module, group: ModuleGroup, *, config: GroupOffloadingConfig, ) -> None: registry = HookRegistry.check_if_exists_or_initialize(module) # We may have already registered a group offloading hook if the module had a torch.nn.Parameter whose parent # is the current module. In such cases, we don't want to overwrite the existing group offloading hook. if registry.get_hook(_GROUP_OFFLOADING) is None: hook = GroupOffloadingHook(group, config=config) registry.register_hook(hook, _GROUP_OFFLOADING) def _apply_lazy_group_offloading_hook( module: torch.nn.Module, group: ModuleGroup, *, config: GroupOffloadingConfig, ) -> None: registry = HookRegistry.check_if_exists_or_initialize(module) # We may have already registered a group offloading hook if the module had a torch.nn.Parameter whose parent # is the current module. In such cases, we don't want to overwrite the existing group offloading hook. if registry.get_hook(_GROUP_OFFLOADING) is None: hook = GroupOffloadingHook(group, config=config) registry.register_hook(hook, _GROUP_OFFLOADING) lazy_prefetch_hook = LazyPrefetchGroupOffloadingHook() registry.register_hook(lazy_prefetch_hook, _LAZY_PREFETCH_GROUP_OFFLOADING) def _gather_parameters_with_no_group_offloading_parent( module: torch.nn.Module, modules_with_group_offloading: Set[str] ) -> list[torch.nn.Parameter]: parameters = [] for name, parameter in module.named_parameters(): has_parent_with_group_offloading = False atoms = name.split(".") while len(atoms) > 0: parent_name = ".".join(atoms) if parent_name in modules_with_group_offloading: has_parent_with_group_offloading = True break atoms.pop() if not has_parent_with_group_offloading: parameters.append((name, parameter)) return parameters def _gather_buffers_with_no_group_offloading_parent( module: torch.nn.Module, modules_with_group_offloading: Set[str] ) -> list[torch.Tensor]: buffers = [] for name, buffer in module.named_buffers(): has_parent_with_group_offloading = False atoms = name.split(".") while len(atoms) > 0: parent_name = ".".join(atoms) if parent_name in modules_with_group_offloading: has_parent_with_group_offloading = True break atoms.pop() if not has_parent_with_group_offloading: buffers.append((name, buffer)) return buffers def _find_parent_module_in_module_dict(name: str, module_dict: dict[str, torch.nn.Module]) -> str: atoms = name.split(".") while len(atoms) > 0: parent_name = ".".join(atoms) if parent_name in module_dict: return parent_name atoms.pop() return "" def _raise_error_if_accelerate_model_or_sequential_hook_present(module: torch.nn.Module) -> None: if not is_accelerate_available(): return for name, submodule in module.named_modules(): if not hasattr(submodule, "_hf_hook"): continue if isinstance(submodule._hf_hook, (AlignDevicesHook, CpuOffload)): raise ValueError( f"Cannot apply group offloading to a module that is already applying an alternative " f"offloading strategy from Accelerate. If you want to apply group offloading, please " f"disable the existing offloading strategy first. Offending module: {name} ({type(submodule)})" ) def _get_top_level_group_offload_hook(module: torch.nn.Module) -> GroupOffloadingHook | None: for submodule in module.modules(): if hasattr(submodule, "_diffusers_hook"): group_offloading_hook = submodule._diffusers_hook.get_hook(_GROUP_OFFLOADING) if group_offloading_hook is not None: return group_offloading_hook return None def _is_group_offload_enabled(module: torch.nn.Module) -> bool: top_level_group_offload_hook = _get_top_level_group_offload_hook(module) return top_level_group_offload_hook is not None def _get_group_onload_device(module: torch.nn.Module) -> torch.device: top_level_group_offload_hook = _get_top_level_group_offload_hook(module) if top_level_group_offload_hook is not None: return top_level_group_offload_hook.config.onload_device raise ValueError("Group offloading is not enabled for the provided module.") def _compute_group_hash(group_id): hashed_id = hashlib.sha256(group_id.encode("utf-8")).hexdigest() # first 16 characters for a reasonably short but unique name return hashed_id[:16] def _maybe_remove_and_reapply_group_offloading(module: torch.nn.Module) -> None: r""" Removes the group offloading hook from the module and re-applies it. This is useful when the module has been modified in-place and the group offloading hook references-to-tensors needs to be updated. The in-place modification can happen in a number of ways, for example, fusing QKV or unloading/loading LoRAs on-the-fly. In this implementation, we make an assumption that group offloading has only been applied at the top-level module, and therefore all submodules have the same onload and offload devices. If this assumption is not true, say in the case where user has applied group offloading at multiple levels, this function will not work as expected. There is some performance penalty associated with doing this when non-default streams are used, because we need to retrace the execution order of the layers with `LazyPrefetchGroupOffloadingHook`. """ top_level_group_offload_hook = _get_top_level_group_offload_hook(module) if top_level_group_offload_hook is None: return registry = HookRegistry.check_if_exists_or_initialize(module) registry.remove_hook(_GROUP_OFFLOADING, recurse=True) registry.remove_hook(_LAYER_EXECUTION_TRACKER, recurse=True) registry.remove_hook(_LAZY_PREFETCH_GROUP_OFFLOADING, recurse=True) _apply_group_offloading(module, top_level_group_offload_hook.config)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/hooks/group_offloading.py", "license": "Apache License 2.0", "lines": 807, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/hooks/test_group_offloading.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.hooks import HookRegistry, ModelHook from diffusers.models import ModelMixin from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.utils import get_logger from diffusers.utils.import_utils import compare_versions from ..testing_utils import ( backend_empty_cache, backend_max_memory_allocated, backend_reset_peak_memory_stats, require_torch_accelerator, torch_device, ) class DummyBlock(torch.nn.Module): def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None: super().__init__() self.proj_in = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.proj_out = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj_in(x) x = self.activation(x) x = self.proj_out(x) return x class DummyModel(ModelMixin): def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: super().__init__() self.linear_1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] ) self.linear_2 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_1(x) x = self.activation(x) for block in self.blocks: x = block(x) x = self.linear_2(x) return x # This model implementation contains one type of block (single_blocks) instantiated before another type of block (double_blocks). # The invocation order of these blocks, however, is first the double_blocks and then the single_blocks. # With group offloading implementation before https://github.com/huggingface/diffusers/pull/11375, such a modeling implementation # would result in a device mismatch error because of the assumptions made by the code. The failure case occurs when using: # offload_type="block_level", num_blocks_per_group=2, use_stream=True # Post the linked PR, the implementation will work as expected. class DummyModelWithMultipleBlocks(ModelMixin): def __init__( self, in_features: int, hidden_features: int, out_features: int, num_layers: int, num_single_layers: int ) -> None: super().__init__() self.linear_1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.single_blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_single_layers)] ) self.double_blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] ) self.linear_2 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_1(x) x = self.activation(x) for block in self.double_blocks: x = block(x) for block in self.single_blocks: x = block(x) x = self.linear_2(x) return x # Test for https://github.com/huggingface/diffusers/pull/12077 class DummyModelWithLayerNorm(ModelMixin): def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: super().__init__() self.linear_1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] ) self.layer_norm = torch.nn.LayerNorm(hidden_features, elementwise_affine=True) self.linear_2 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_1(x) x = self.activation(x) for block in self.blocks: x = block(x) x = self.layer_norm(x) x = self.linear_2(x) return x class DummyPipeline(DiffusionPipeline): model_cpu_offload_seq = "model" def __init__(self, model: torch.nn.Module) -> None: super().__init__() self.register_modules(model=model) def __call__(self, x: torch.Tensor) -> torch.Tensor: for _ in range(2): x = x + 0.1 * self.model(x) return x class LayerOutputTrackerHook(ModelHook): def __init__(self): super().__init__() self.outputs = [] def post_forward(self, module, output): self.outputs.append(output) return output # Model with only standalone computational layers at top level class DummyModelWithStandaloneLayers(ModelMixin): def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None: super().__init__() self.layer1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.layer2 = torch.nn.Linear(hidden_features, hidden_features) self.layer3 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.layer1(x) x = self.activation(x) x = self.layer2(x) x = self.layer3(x) return x # Model with deeply nested structure class DummyModelWithDeeplyNestedBlocks(ModelMixin): def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None: super().__init__() self.input_layer = torch.nn.Linear(in_features, hidden_features) self.container = ContainerWithNestedModuleList(hidden_features) self.output_layer = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.input_layer(x) x = self.container(x) x = self.output_layer(x) return x class ContainerWithNestedModuleList(torch.nn.Module): def __init__(self, features: int) -> None: super().__init__() # Top-level computational layer self.proj_in = torch.nn.Linear(features, features) # Nested container with ModuleList self.nested_container = NestedContainer(features) # Another top-level computational layer self.proj_out = torch.nn.Linear(features, features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj_in(x) x = self.nested_container(x) x = self.proj_out(x) return x class NestedContainer(torch.nn.Module): def __init__(self, features: int) -> None: super().__init__() self.blocks = torch.nn.ModuleList([torch.nn.Linear(features, features), torch.nn.Linear(features, features)]) self.norm = torch.nn.LayerNorm(features) def forward(self, x: torch.Tensor) -> torch.Tensor: for block in self.blocks: x = block(x) x = self.norm(x) return x @require_torch_accelerator class GroupOffloadTests(unittest.TestCase): in_features = 64 hidden_features = 256 out_features = 64 num_layers = 4 def setUp(self): with torch.no_grad(): self.model = self.get_model() self.input = torch.randn((4, self.in_features)).to(torch_device) def tearDown(self): super().tearDown() del self.model del self.input gc.collect() backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) def get_model(self): torch.manual_seed(0) return DummyModel( in_features=self.in_features, hidden_features=self.hidden_features, out_features=self.out_features, num_layers=self.num_layers, ) def test_offloading_forward_pass(self): @torch.no_grad() def run_forward(model): gc.collect() backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) self.assertTrue( all( module._diffusers_hook.get_hook("group_offloading") is not None for module in model.modules() if hasattr(module, "_diffusers_hook") ) ) model.eval() output = model(self.input)[0].cpu() max_memory_allocated = backend_max_memory_allocated(torch_device) return output, max_memory_allocated self.model.to(torch_device) output_without_group_offloading, mem_baseline = run_forward(self.model) self.model.to("cpu") model = self.get_model() model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) output_with_group_offloading1, mem1 = run_forward(model) model = self.get_model() model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1) output_with_group_offloading2, mem2 = run_forward(model) model = self.get_model() model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) output_with_group_offloading3, mem3 = run_forward(model) model = self.get_model() model.enable_group_offload(torch_device, offload_type="leaf_level") output_with_group_offloading4, mem4 = run_forward(model) model = self.get_model() model.enable_group_offload(torch_device, offload_type="leaf_level", use_stream=True) output_with_group_offloading5, mem5 = run_forward(model) # Precision assertions - offloading should not impact the output self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading3, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading4, atol=1e-5)) self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading5, atol=1e-5)) # Memory assertions - offloading should reduce memory usage self.assertTrue(mem4 <= mem5 < mem2 <= mem3 < mem1 < mem_baseline) def test_warning_logged_if_group_offloaded_module_moved_to_accelerator(self): if torch.device(torch_device).type not in ["cuda", "xpu"]: return self.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) logger = get_logger("diffusers.models.modeling_utils") logger.setLevel("INFO") with self.assertLogs(logger, level="WARNING") as cm: self.model.to(torch_device) self.assertIn(f"The module '{self.model.__class__.__name__}' is group offloaded", cm.output[0]) def test_warning_logged_if_group_offloaded_pipe_moved_to_accelerator(self): if torch.device(torch_device).type not in ["cuda", "xpu"]: return pipe = DummyPipeline(self.model) self.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) logger = get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel("INFO") with self.assertLogs(logger, level="WARNING") as cm: pipe.to(torch_device) self.assertIn(f"The module '{self.model.__class__.__name__}' is group offloaded", cm.output[0]) def test_error_raised_if_streams_used_and_no_accelerator_device(self): torch_accelerator_module = getattr(torch, torch_device, torch.cuda) original_is_available = torch_accelerator_module.is_available torch_accelerator_module.is_available = lambda: False with self.assertRaises(ValueError): self.model.enable_group_offload( onload_device=torch.device(torch_device), offload_type="leaf_level", use_stream=True ) torch_accelerator_module.is_available = original_is_available def test_error_raised_if_supports_group_offloading_false(self): self.model._supports_group_offloading = False with self.assertRaisesRegex(ValueError, "does not support group offloading"): self.model.enable_group_offload(onload_device=torch.device(torch_device)) def test_error_raised_if_model_offloading_applied_on_group_offloaded_module(self): pipe = DummyPipeline(self.model) pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"): pipe.enable_model_cpu_offload() def test_error_raised_if_sequential_offloading_applied_on_group_offloaded_module(self): pipe = DummyPipeline(self.model) pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"): pipe.enable_sequential_cpu_offload() def test_error_raised_if_group_offloading_applied_on_model_offloaded_module(self): pipe = DummyPipeline(self.model) pipe.enable_model_cpu_offload() with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"): pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) def test_error_raised_if_group_offloading_applied_on_sequential_offloaded_module(self): pipe = DummyPipeline(self.model) pipe.enable_sequential_cpu_offload() with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"): pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) def test_block_level_stream_with_invocation_order_different_from_initialization_order(self): if torch.device(torch_device).type not in ["cuda", "xpu"]: return model = DummyModelWithMultipleBlocks( in_features=self.in_features, hidden_features=self.hidden_features, out_features=self.out_features, num_layers=self.num_layers, num_single_layers=self.num_layers + 1, ) model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) context = contextlib.nullcontext() if compare_versions("diffusers", "<=", "0.33.0"): # Will raise a device mismatch RuntimeError mentioning weights are on CPU but input is on device context = self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device") with context: model(self.input) @parameterized.expand([("block_level",), ("leaf_level",)]) def test_block_level_offloading_with_parameter_only_module_group(self, offload_type: str): if torch.device(torch_device).type not in ["cuda", "xpu"]: return def apply_layer_output_tracker_hook(model: DummyModelWithLayerNorm): for name, module in model.named_modules(): registry = HookRegistry.check_if_exists_or_initialize(module) hook = LayerOutputTrackerHook() registry.register_hook(hook, "layer_output_tracker") model_ref = DummyModelWithLayerNorm(128, 256, 128, 2) model = DummyModelWithLayerNorm(128, 256, 128, 2) model.load_state_dict(model_ref.state_dict(), strict=True) model_ref.to(torch_device) model.enable_group_offload(torch_device, offload_type=offload_type, num_blocks_per_group=1, use_stream=True) apply_layer_output_tracker_hook(model_ref) apply_layer_output_tracker_hook(model) x = torch.randn(2, 128).to(torch_device) out_ref = model_ref(x) out = model(x) self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match.") num_repeats = 2 for i in range(num_repeats): out_ref = model_ref(x) out = model(x) self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match after multiple invocations.") for (ref_name, ref_module), (name, module) in zip(model_ref.named_modules(), model.named_modules()): assert ref_name == name ref_outputs = ( HookRegistry.check_if_exists_or_initialize(ref_module).get_hook("layer_output_tracker").outputs ) outputs = HookRegistry.check_if_exists_or_initialize(module).get_hook("layer_output_tracker").outputs cumulated_absmax = 0.0 for i in range(len(outputs)): diff = ref_outputs[0] - outputs[i] absdiff = diff.abs() absmax = absdiff.max().item() cumulated_absmax += absmax self.assertLess( cumulated_absmax, 1e-5, f"Output differences for {name} exceeded threshold: {cumulated_absmax:.5f}" ) def test_vae_like_model_without_streams(self): """Test VAE-like model with block-level offloading but without streams.""" if torch.device(torch_device).type not in ["cuda", "xpu"]: return config = self.get_autoencoder_kl_config() model = AutoencoderKL(**config) model_ref = AutoencoderKL(**config) model_ref.load_state_dict(model.state_dict(), strict=True) model_ref.to(torch_device) model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=False) x = torch.randn(2, 3, 32, 32).to(torch_device) with torch.no_grad(): out_ref = model_ref(x).sample out = model(x).sample self.assertTrue( torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match for VAE-like model without streams." ) def test_model_with_only_standalone_layers(self): """Test that models with only standalone layers (no ModuleList/Sequential) work with block-level offloading.""" if torch.device(torch_device).type not in ["cuda", "xpu"]: return model = DummyModelWithStandaloneLayers(in_features=64, hidden_features=128, out_features=64) model_ref = DummyModelWithStandaloneLayers(in_features=64, hidden_features=128, out_features=64) model_ref.load_state_dict(model.state_dict(), strict=True) model_ref.to(torch_device) model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) x = torch.randn(2, 64).to(torch_device) with torch.no_grad(): for i in range(2): out_ref = model_ref(x) out = model(x) self.assertTrue( torch.allclose(out_ref, out, atol=1e-5), f"Outputs do not match at iteration {i} for model with standalone layers.", ) @parameterized.expand([("block_level",), ("leaf_level",)]) def test_standalone_conv_layers_with_both_offload_types(self, offload_type: str): """Test that standalone Conv2d layers work correctly with both block-level and leaf-level offloading.""" if torch.device(torch_device).type not in ["cuda", "xpu"]: return config = self.get_autoencoder_kl_config() model = AutoencoderKL(**config) model_ref = AutoencoderKL(**config) model_ref.load_state_dict(model.state_dict(), strict=True) model_ref.to(torch_device) model.enable_group_offload(torch_device, offload_type=offload_type, num_blocks_per_group=1, use_stream=True) x = torch.randn(2, 3, 32, 32).to(torch_device) with torch.no_grad(): out_ref = model_ref(x).sample out = model(x).sample self.assertTrue( torch.allclose(out_ref, out, atol=1e-5), f"Outputs do not match for standalone Conv layers with {offload_type}.", ) def test_multiple_invocations_with_vae_like_model(self): """Test that multiple forward passes work correctly with VAE-like model.""" if torch.device(torch_device).type not in ["cuda", "xpu"]: return config = self.get_autoencoder_kl_config() model = AutoencoderKL(**config) model_ref = AutoencoderKL(**config) model_ref.load_state_dict(model.state_dict(), strict=True) model_ref.to(torch_device) model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) x = torch.randn(2, 3, 32, 32).to(torch_device) with torch.no_grad(): for i in range(2): out_ref = model_ref(x).sample out = model(x).sample self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), f"Outputs do not match at iteration {i}.") def test_nested_container_parameters_offloading(self): """Test that parameters from non-computational layers in nested containers are handled correctly.""" if torch.device(torch_device).type not in ["cuda", "xpu"]: return model = DummyModelWithDeeplyNestedBlocks(in_features=64, hidden_features=128, out_features=64) model_ref = DummyModelWithDeeplyNestedBlocks(in_features=64, hidden_features=128, out_features=64) model_ref.load_state_dict(model.state_dict(), strict=True) model_ref.to(torch_device) model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) x = torch.randn(2, 64).to(torch_device) with torch.no_grad(): for i in range(2): out_ref = model_ref(x) out = model(x) self.assertTrue( torch.allclose(out_ref, out, atol=1e-5), f"Outputs do not match at iteration {i} for nested parameters.", ) def get_autoencoder_kl_config(self, block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [2, 4] norm_num_groups = norm_num_groups or 2 init_dict = { "block_out_channels": block_out_channels, "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), "latent_channels": 4, "norm_num_groups": norm_num_groups, "layers_per_block": 1, } return init_dict
{ "repo_id": "huggingface/diffusers", "file_path": "tests/hooks/test_group_offloading.py", "license": "Apache License 2.0", "lines": 451, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/models/controlnets/multicontrolnet_union.py
import os from typing import Any, Callable import torch from torch import nn from ...utils import logging from ..controlnets.controlnet import ControlNetOutput from ..controlnets.controlnet_union import ControlNetUnionModel from ..modeling_utils import ModelMixin logger = logging.get_logger(__name__) class MultiControlNetUnionModel(ModelMixin): r""" Multiple `ControlNetUnionModel` wrapper class for Multi-ControlNet-Union. This module is a wrapper for multiple instances of the `ControlNetUnionModel`. The `forward()` API is designed to be compatible with `ControlNetUnionModel`. Args: controlnets (`list[ControlNetUnionModel]`): Provides additional conditioning to the unet during the denoising process. You must set multiple `ControlNetUnionModel` as a list. """ def __init__(self, controlnets: list[ControlNetUnionModel] | tuple[ControlNetUnionModel]): super().__init__() self.nets = nn.ModuleList(controlnets) def forward( self, sample: torch.Tensor, timestep: torch.Tensor | float | int, encoder_hidden_states: torch.Tensor, controlnet_cond: list[torch.tensor], control_type: list[torch.Tensor], control_type_idx: list[list[int]], conditioning_scale: list[float], class_labels: torch.Tensor | None = None, timestep_cond: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, added_cond_kwargs: dict[str, torch.Tensor] | None = None, cross_attention_kwargs: dict[str, Any] | None = None, guess_mode: bool = False, return_dict: bool = True, ) -> ControlNetOutput | tuple: down_block_res_samples, mid_block_res_sample = None, None for i, (image, ctype, ctype_idx, scale, controlnet) in enumerate( zip(controlnet_cond, control_type, control_type_idx, conditioning_scale, self.nets) ): if scale == 0.0: continue down_samples, mid_sample = controlnet( sample=sample, timestep=timestep, encoder_hidden_states=encoder_hidden_states, controlnet_cond=image, control_type=ctype, control_type_idx=ctype_idx, conditioning_scale=scale, class_labels=class_labels, timestep_cond=timestep_cond, attention_mask=attention_mask, added_cond_kwargs=added_cond_kwargs, cross_attention_kwargs=cross_attention_kwargs, from_multi=True, guess_mode=guess_mode, return_dict=return_dict, ) # merge samples if down_block_res_samples is None and mid_block_res_sample is None: down_block_res_samples, mid_block_res_sample = down_samples, mid_sample else: down_block_res_samples = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(down_block_res_samples, down_samples) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample # Copied from diffusers.models.controlnets.multicontrolnet.MultiControlNetModel.save_pretrained with ControlNet->ControlNetUnion def save_pretrained( self, save_directory: str | os.PathLike, is_main_process: bool = True, save_function: Callable = None, safe_serialization: bool = True, variant: str | None = None, ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `[`~models.controlnets.multicontrolnet.MultiControlNetUnionModel.from_pretrained`]` class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. is_main_process (`bool`, *optional*, defaults to `True`): Whether the process calling this is the main process or not. Useful when in distributed training like TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on the main process to avoid race conditions. save_function (`Callable`): The function to use to save the state dictionary. Useful on distributed training like TPUs when one need to replace `torch.save` by another method. Can be configured with the environment variable `DIFFUSERS_SAVE_MODE`. safe_serialization (`bool`, *optional*, defaults to `True`): Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). variant (`str`, *optional*): If specified, weights are saved in the format pytorch_model.<variant>.bin. """ for idx, controlnet in enumerate(self.nets): suffix = "" if idx == 0 else f"_{idx}" controlnet.save_pretrained( save_directory + suffix, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant, ) @classmethod # Copied from diffusers.models.controlnets.multicontrolnet.MultiControlNetModel.from_pretrained with ControlNet->ControlNetUnion def from_pretrained(cls, pretrained_model_path: str | os.PathLike | None, **kwargs): r""" Instantiate a pretrained MultiControlNetUnion model from multiple pre-trained controlnet models. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with `model.train()`. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_path (`os.PathLike`): A path to a *directory* containing model weights saved using [`~models.controlnets.multicontrolnet.MultiControlNetUnionModel.save_pretrained`], e.g., `./my_model_directory/controlnet`. torch_dtype (`torch.dtype`, *optional*): Override the default `torch.dtype` and load the model under this dtype. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. device_map (`str` or `dict[str, int | str | torch.device]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading by not initializing the weights and only loading the pre-trained weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch, setting this argument to `True` will raise an error. variant (`str`, *optional*): If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is ignored when using `from_flax`. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the `safetensors` weights will be downloaded if they're available **and** if the `safetensors` library is installed. If set to `True`, the model will be forcibly loaded from `safetensors` weights. If set to `False`, loading will *not* use `safetensors`. """ idx = 0 controlnets = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... model_path_to_load = pretrained_model_path while os.path.isdir(model_path_to_load): controlnet = ControlNetUnionModel.from_pretrained(model_path_to_load, **kwargs) controlnets.append(controlnet) idx += 1 model_path_to_load = pretrained_model_path + f"_{idx}" logger.info(f"{len(controlnets)} controlnets loaded from {pretrained_model_path}.") if len(controlnets) == 0: raise ValueError( f"No ControlNetUnions found under {os.path.dirname(pretrained_model_path)}. Expected at least {pretrained_model_path + '_0'}." ) return cls(controlnets)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/controlnets/multicontrolnet_union.py", "license": "Apache License 2.0", "lines": 170, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:tests/single_file/test_lumina2_transformer.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from diffusers import ( Lumina2Transformer2DModel, ) from ..testing_utils import ( enable_full_determinism, ) from .single_file_testing_utils import SingleFileModelTesterMixin enable_full_determinism() class TestLumina2Transformer2DModelSingleFile(SingleFileModelTesterMixin): model_class = Lumina2Transformer2DModel ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors" alternate_keys_ckpt_paths = [ "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors" ] repo_id = "Alpha-VLLM/Lumina-Image-2.0" subfolder = "transformer"
{ "repo_id": "huggingface/diffusers", "file_path": "tests/single_file/test_lumina2_transformer.py", "license": "Apache License 2.0", "lines": 30, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/models/transformers/transformer_lumina2.py
# Copyright 2025 Alpha-VLLM Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...loaders.single_file_model import FromOriginalModelMixin from ...utils import apply_lora_scale, logging from ..attention import LuminaFeedForward from ..attention_processor import Attention from ..embeddings import TimestepEmbedding, Timesteps, apply_rotary_emb, get_1d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import LuminaLayerNormContinuous, LuminaRMSNormZero, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class Lumina2CombinedTimestepCaptionEmbedding(nn.Module): def __init__( self, hidden_size: int = 4096, cap_feat_dim: int = 2048, frequency_embedding_size: int = 256, norm_eps: float = 1e-5, ) -> None: super().__init__() self.time_proj = Timesteps( num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0.0 ) self.timestep_embedder = TimestepEmbedding( in_channels=frequency_embedding_size, time_embed_dim=min(hidden_size, 1024) ) self.caption_embedder = nn.Sequential( RMSNorm(cap_feat_dim, eps=norm_eps), nn.Linear(cap_feat_dim, hidden_size, bias=True) ) def forward( self, hidden_states: torch.Tensor, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: timestep_proj = self.time_proj(timestep).type_as(hidden_states) time_embed = self.timestep_embedder(timestep_proj) caption_embed = self.caption_embedder(encoder_hidden_states) return time_embed, caption_embed class Lumina2AttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is used in the Lumina2Transformer2DModel model. It applies normalization and RoPE on query and key vectors. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, base_sequence_length: int | None = None, ) -> torch.Tensor: batch_size, sequence_length, _ = hidden_states.shape # Get Query-Key-Value Pair query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query_dim = query.shape[-1] inner_dim = key.shape[-1] head_dim = query_dim // attn.heads dtype = query.dtype # Get key-value heads kv_heads = inner_dim // head_dim query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, kv_heads, head_dim) value = value.view(batch_size, -1, kv_heads, head_dim) # Apply Query-Key Norm if needed if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE if needed if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb, use_real=False) key = apply_rotary_emb(key, image_rotary_emb, use_real=False) query, key = query.to(dtype), key.to(dtype) # Apply proportional attention if true if base_sequence_length is not None: softmax_scale = math.sqrt(math.log(sequence_length, base_sequence_length)) * attn.scale else: softmax_scale = attn.scale # perform Grouped-qurey Attention (GQA) n_rep = attn.heads // kv_heads if n_rep >= 1: key = key.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) value = value.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) if attention_mask is not None: attention_mask = attention_mask.bool().view(batch_size, 1, 1, -1) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, scale=softmax_scale ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.type_as(query) # linear proj hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class Lumina2TransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, num_kv_heads: int, multiple_of: int, ffn_dim_multiplier: float, norm_eps: float, modulation: bool = True, ) -> None: super().__init__() self.head_dim = dim // num_attention_heads self.modulation = modulation self.attn = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // num_attention_heads, qk_norm="rms_norm", heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-5, bias=False, out_bias=False, processor=Lumina2AttnProcessor2_0(), ) self.feed_forward = LuminaFeedForward( dim=dim, inner_dim=4 * dim, multiple_of=multiple_of, ffn_dim_multiplier=ffn_dim_multiplier, ) if modulation: self.norm1 = LuminaRMSNormZero( embedding_dim=dim, norm_eps=norm_eps, norm_elementwise_affine=True, ) else: self.norm1 = RMSNorm(dim, eps=norm_eps) self.ffn_norm1 = RMSNorm(dim, eps=norm_eps) self.norm2 = RMSNorm(dim, eps=norm_eps) self.ffn_norm2 = RMSNorm(dim, eps=norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor, temb: torch.Tensor | None = None, ) -> torch.Tensor: if self.modulation: norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb) attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_hidden_states, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + gate_msa.unsqueeze(1).tanh() * self.norm2(attn_output) mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1))) hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output) else: norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_hidden_states, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + self.norm2(attn_output) mlp_output = self.feed_forward(self.ffn_norm1(hidden_states)) hidden_states = hidden_states + self.ffn_norm2(mlp_output) return hidden_states class Lumina2RotaryPosEmbed(nn.Module): def __init__(self, theta: int, axes_dim: list[int], axes_lens: list[int] = (300, 512, 512), patch_size: int = 2): super().__init__() self.theta = theta self.axes_dim = axes_dim self.axes_lens = axes_lens self.patch_size = patch_size self.freqs_cis = self._precompute_freqs_cis(axes_dim, axes_lens, theta) def _precompute_freqs_cis(self, axes_dim: list[int], axes_lens: list[int], theta: int) -> list[torch.Tensor]: freqs_cis = [] freqs_dtype = torch.float32 if torch.backends.mps.is_available() else torch.float64 for i, (d, e) in enumerate(zip(axes_dim, axes_lens)): emb = get_1d_rotary_pos_embed(d, e, theta=self.theta, freqs_dtype=freqs_dtype) freqs_cis.append(emb) return freqs_cis def _get_freqs_cis(self, ids: torch.Tensor) -> torch.Tensor: device = ids.device if ids.device.type == "mps": ids = ids.to("cpu") result = [] for i in range(len(self.axes_dim)): freqs = self.freqs_cis[i].to(ids.device) index = ids[:, :, i : i + 1].repeat(1, 1, freqs.shape[-1]).to(torch.int64) result.append(torch.gather(freqs.unsqueeze(0).repeat(index.shape[0], 1, 1), dim=1, index=index)) return torch.cat(result, dim=-1).to(device) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor): batch_size, channels, height, width = hidden_states.shape p = self.patch_size post_patch_height, post_patch_width = height // p, width // p image_seq_len = post_patch_height * post_patch_width device = hidden_states.device encoder_seq_len = attention_mask.shape[1] l_effective_cap_len = attention_mask.sum(dim=1).tolist() seq_lengths = [cap_seq_len + image_seq_len for cap_seq_len in l_effective_cap_len] max_seq_len = max(seq_lengths) # Create position IDs position_ids = torch.zeros(batch_size, max_seq_len, 3, dtype=torch.int32, device=device) for i, (cap_seq_len, seq_len) in enumerate(zip(l_effective_cap_len, seq_lengths)): # add caption position ids position_ids[i, :cap_seq_len, 0] = torch.arange(cap_seq_len, dtype=torch.int32, device=device) position_ids[i, cap_seq_len:seq_len, 0] = cap_seq_len # add image position ids row_ids = ( torch.arange(post_patch_height, dtype=torch.int32, device=device) .view(-1, 1) .repeat(1, post_patch_width) .flatten() ) col_ids = ( torch.arange(post_patch_width, dtype=torch.int32, device=device) .view(1, -1) .repeat(post_patch_height, 1) .flatten() ) position_ids[i, cap_seq_len:seq_len, 1] = row_ids position_ids[i, cap_seq_len:seq_len, 2] = col_ids # Get combined rotary embeddings freqs_cis = self._get_freqs_cis(position_ids) # create separate rotary embeddings for captions and images cap_freqs_cis = torch.zeros( batch_size, encoder_seq_len, freqs_cis.shape[-1], device=device, dtype=freqs_cis.dtype ) img_freqs_cis = torch.zeros( batch_size, image_seq_len, freqs_cis.shape[-1], device=device, dtype=freqs_cis.dtype ) for i, (cap_seq_len, seq_len) in enumerate(zip(l_effective_cap_len, seq_lengths)): cap_freqs_cis[i, :cap_seq_len] = freqs_cis[i, :cap_seq_len] img_freqs_cis[i, :image_seq_len] = freqs_cis[i, cap_seq_len:seq_len] # image patch embeddings hidden_states = ( hidden_states.view(batch_size, channels, post_patch_height, p, post_patch_width, p) .permute(0, 2, 4, 3, 5, 1) .flatten(3) .flatten(1, 2) ) return hidden_states, cap_freqs_cis, img_freqs_cis, freqs_cis, l_effective_cap_len, seq_lengths class Lumina2Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): r""" Lumina2NextDiT: Diffusion model with a Transformer backbone. Parameters: sample_size (`int`): The width of the latent images. This is fixed during training since it is used to learn a number of position embeddings. patch_size (`int`, *optional*, (`int`, *optional*, defaults to 2): The size of each patch in the image. This parameter defines the resolution of patches fed into the model. in_channels (`int`, *optional*, defaults to 4): The number of input channels for the model. Typically, this matches the number of channels in the input images. hidden_size (`int`, *optional*, defaults to 4096): The dimensionality of the hidden layers in the model. This parameter determines the width of the model's hidden representations. num_layers (`int`, *optional*, default to 32): The number of layers in the model. This defines the depth of the neural network. num_attention_heads (`int`, *optional*, defaults to 32): The number of attention heads in each attention layer. This parameter specifies how many separate attention mechanisms are used. num_kv_heads (`int`, *optional*, defaults to 8): The number of key-value heads in the attention mechanism, if different from the number of attention heads. If None, it defaults to num_attention_heads. multiple_of (`int`, *optional*, defaults to 256): A factor that the hidden size should be a multiple of. This can help optimize certain hardware configurations. ffn_dim_multiplier (`float`, *optional*): A multiplier for the dimensionality of the feed-forward network. If None, it uses a default value based on the model configuration. norm_eps (`float`, *optional*, defaults to 1e-5): A small value added to the denominator for numerical stability in normalization layers. scaling_factor (`float`, *optional*, defaults to 1.0): A scaling factor applied to certain parameters or layers in the model. This can be used for adjusting the overall scale of the model's operations. """ _supports_gradient_checkpointing = True _no_split_modules = ["Lumina2TransformerBlock"] _skip_layerwise_casting_patterns = ["x_embedder", "norm"] @register_to_config def __init__( self, sample_size: int = 128, patch_size: int = 2, in_channels: int = 16, out_channels: int | None = None, hidden_size: int = 2304, num_layers: int = 26, num_refiner_layers: int = 2, num_attention_heads: int = 24, num_kv_heads: int = 8, multiple_of: int = 256, ffn_dim_multiplier: float | None = None, norm_eps: float = 1e-5, scaling_factor: float = 1.0, axes_dim_rope: tuple[int, int, int] = (32, 32, 32), axes_lens: tuple[int, int, int] = (300, 512, 512), cap_feat_dim: int = 1024, ) -> None: super().__init__() self.out_channels = out_channels or in_channels # 1. Positional, patch & conditional embeddings self.rope_embedder = Lumina2RotaryPosEmbed( theta=10000, axes_dim=axes_dim_rope, axes_lens=axes_lens, patch_size=patch_size ) self.x_embedder = nn.Linear(in_features=patch_size * patch_size * in_channels, out_features=hidden_size) self.time_caption_embed = Lumina2CombinedTimestepCaptionEmbedding( hidden_size=hidden_size, cap_feat_dim=cap_feat_dim, norm_eps=norm_eps ) # 2. Noise and context refinement blocks self.noise_refiner = nn.ModuleList( [ Lumina2TransformerBlock( hidden_size, num_attention_heads, num_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, modulation=True, ) for _ in range(num_refiner_layers) ] ) self.context_refiner = nn.ModuleList( [ Lumina2TransformerBlock( hidden_size, num_attention_heads, num_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, modulation=False, ) for _ in range(num_refiner_layers) ] ) # 3. Transformer blocks self.layers = nn.ModuleList( [ Lumina2TransformerBlock( hidden_size, num_attention_heads, num_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, modulation=True, ) for _ in range(num_layers) ] ) # 4. Output norm & projection self.norm_out = LuminaLayerNormContinuous( embedding_dim=hidden_size, conditioning_embedding_dim=min(hidden_size, 1024), elementwise_affine=False, eps=1e-6, bias=True, out_dim=patch_size * patch_size * self.out_channels, ) self.gradient_checkpointing = False @apply_lora_scale("attention_kwargs") def forward( self, hidden_states: torch.Tensor, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_attention_mask: torch.Tensor, attention_kwargs: dict[str, Any] | None = None, return_dict: bool = True, ) -> torch.Tensor | Transformer2DModelOutput: # 1. Condition, positional & patch embedding batch_size, _, height, width = hidden_states.shape temb, encoder_hidden_states = self.time_caption_embed(hidden_states, timestep, encoder_hidden_states) ( hidden_states, context_rotary_emb, noise_rotary_emb, rotary_emb, encoder_seq_lengths, seq_lengths, ) = self.rope_embedder(hidden_states, encoder_attention_mask) hidden_states = self.x_embedder(hidden_states) # 2. Context & noise refinement for layer in self.context_refiner: encoder_hidden_states = layer(encoder_hidden_states, encoder_attention_mask, context_rotary_emb) for layer in self.noise_refiner: hidden_states = layer(hidden_states, None, noise_rotary_emb, temb) # 3. Joint Transformer blocks max_seq_len = max(seq_lengths) use_mask = len(set(seq_lengths)) > 1 attention_mask = hidden_states.new_zeros(batch_size, max_seq_len, dtype=torch.bool) joint_hidden_states = hidden_states.new_zeros(batch_size, max_seq_len, self.config.hidden_size) for i, (encoder_seq_len, seq_len) in enumerate(zip(encoder_seq_lengths, seq_lengths)): attention_mask[i, :seq_len] = True joint_hidden_states[i, :encoder_seq_len] = encoder_hidden_states[i, :encoder_seq_len] joint_hidden_states[i, encoder_seq_len:seq_len] = hidden_states[i] hidden_states = joint_hidden_states for layer in self.layers: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( layer, hidden_states, attention_mask if use_mask else None, rotary_emb, temb ) else: hidden_states = layer(hidden_states, attention_mask if use_mask else None, rotary_emb, temb) # 4. Output norm & projection hidden_states = self.norm_out(hidden_states, temb) # 5. Unpatchify p = self.config.patch_size output = [] for i, (encoder_seq_len, seq_len) in enumerate(zip(encoder_seq_lengths, seq_lengths)): output.append( hidden_states[i][encoder_seq_len:seq_len] .view(height // p, width // p, p, p, self.out_channels) .permute(4, 0, 2, 1, 3) .flatten(3, 4) .flatten(1, 2) ) output = torch.stack(output, dim=0) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_lumina2.py", "license": "Apache License 2.0", "lines": 452, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/lumina2/pipeline_lumina2.py
# Copyright 2025 Alpha-VLLM and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import numpy as np import torch from transformers import Gemma2PreTrainedModel, GemmaTokenizer, GemmaTokenizerFast from ...image_processor import VaeImageProcessor from ...loaders import Lumina2LoraLoaderMixin from ...models import AutoencoderKL from ...models.transformers.transformer_lumina2 import Lumina2Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( deprecate, is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import Lumina2Pipeline >>> pipe = Lumina2Pipeline.from_pretrained("Alpha-VLLM/Lumina-Image-2.0", torch_dtype=torch.bfloat16) >>> # Enable memory optimizations. >>> pipe.enable_model_cpu_offload() >>> prompt = "Upper body of a young woman in a Victorian-era outfit with brass goggles and leather straps. Background shows an industrial revolution cityscape with smoky skies and tall, metal structures" >>> image = pipe(prompt).images[0] ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class Lumina2Pipeline(DiffusionPipeline, Lumina2LoraLoaderMixin): r""" Pipeline for text-to-image generation using Lumina-T2I. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`Gemma2PreTrainedModel`]): Frozen Gemma2 text-encoder. tokenizer (`GemmaTokenizer` or `GemmaTokenizerFast`): Gemma tokenizer. transformer ([`Transformer2DModel`]): A text conditioned `Transformer2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] model_cpu_offload_seq = "text_encoder->transformer->vae" def __init__( self, transformer: Lumina2Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: Gemma2PreTrainedModel, tokenizer: GemmaTokenizer | GemmaTokenizerFast, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor = 8 self.default_sample_size = ( self.transformer.config.sample_size if hasattr(self, "transformer") and self.transformer is not None else 128 ) self.default_image_size = self.default_sample_size * self.vae_scale_factor self.system_prompt = "You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts." self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) if getattr(self, "tokenizer", None) is not None: self.tokenizer.padding_side = "right" def _get_gemma_prompt_embeds( self, prompt: str | list[str], device: torch.device | None = None, max_sequence_length: int = 256, ) -> tuple[torch.Tensor, torch.Tensor]: device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids.to(device) if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because Gemma can only handle sequences up to" f" {max_sequence_length} tokens: {removed_text}" ) prompt_attention_mask = text_inputs.attention_mask.to(device) prompt_embeds = self.text_encoder( text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True ) prompt_embeds = prompt_embeds.hidden_states[-2] if self.text_encoder is not None: dtype = self.text_encoder.dtype elif self.transformer is not None: dtype = self.transformer.dtype else: dtype = None prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) _, seq_len, _ = prompt_embeds.shape return prompt_embeds, prompt_attention_mask # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt def encode_prompt( self, prompt: str | list[str], do_classifier_free_guidance: bool = True, negative_prompt: str | list[str] = None, num_images_per_prompt: int = 1, device: torch.device | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, system_prompt: str | None = None, max_sequence_length: int = 256, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For Lumina-T2I, this should be "". do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not num_images_per_prompt (`int`, *optional*, defaults to 1): number of images that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. For Lumina-T2I, it's should be the embeddings of the "" string. max_sequence_length (`int`, defaults to `256`): Maximum sequence length to use for the prompt. """ if device is None: device = self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if system_prompt is None: system_prompt = self.system_prompt if prompt is not None: prompt = [system_prompt + " <Prompt Start> " + p for p in prompt] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=prompt, device=device, max_sequence_length=max_sequence_length, ) batch_size, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_images_per_prompt, 1) prompt_attention_mask = prompt_attention_mask.view(batch_size * num_images_per_prompt, -1) # Get negative embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt if negative_prompt is not None else "" # Normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): negative_prompt = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=negative_prompt, device=device, max_sequence_length=max_sequence_length, ) batch_size, seq_len, _ = negative_prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_images_per_prompt, 1) negative_prompt_attention_mask = negative_prompt_attention_mask.view( batch_size * num_images_per_prompt, -1 ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: raise ValueError( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." deprecate( "enable_vae_slicing", "0.40.0", depr_message, ) self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." deprecate( "disable_vae_slicing", "0.40.0", depr_message, ) self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." deprecate( "enable_vae_tiling", "0.40.0", depr_message, ) self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." deprecate( "disable_vae_tiling", "0.40.0", depr_message, ) self.vae.disable_tiling() def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) return latents @property def guidance_scale(self): return self._guidance_scale @property def attention_kwargs(self): return self._attention_kwargs # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, width: int | None = None, height: int | None = None, num_inference_steps: int = 30, guidance_scale: float = 4.0, negative_prompt: str | list[str] = None, sigmas: list[float] = None, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], system_prompt: str | None = None, cfg_trunc_ratio: float = 1.0, cfg_normalization: bool = True, max_sequence_length: int = 256, ) -> ImagePipelineOutput | tuple: """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_inference_steps (`int`, *optional*, defaults to 30): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. height (`int`, *optional*, defaults to self.unet.config.sample_size): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size): The width in pixels of the generated image. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. For Lumina-T2I this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. attention_kwargs: A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. system_prompt (`str`, *optional*): The system prompt to use for the image generation. cfg_trunc_ratio (`float`, *optional*, defaults to `1.0`): The ratio of the timestep interval to apply normalization-based guidance scale. cfg_normalization (`bool`, *optional*, defaults to `True`): Whether to apply normalization-based guidance scale. max_sequence_length (`int`, defaults to `256`): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images """ height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt, self.do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, system_prompt=system_prompt, ) # 4. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents, ) # 5. Prepare timesteps sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas image_seq_len = latents.shape[1] mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # compute whether apply classifier-free truncation on this timestep do_classifier_free_truncation = (i + 1) / num_inference_steps > cfg_trunc_ratio # reverse the timestep since Lumina uses t=0 as the noise and t=1 as the image current_timestep = 1 - t / self.scheduler.config.num_train_timesteps # broadcast to batch dimension in a way that's compatible with ONNX/Core ML current_timestep = current_timestep.expand(latents.shape[0]) noise_pred_cond = self.transformer( hidden_states=latents, timestep=current_timestep, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, return_dict=False, attention_kwargs=self.attention_kwargs, )[0] # perform normalization-based guidance scale on a truncated timestep interval if self.do_classifier_free_guidance and not do_classifier_free_truncation: noise_pred_uncond = self.transformer( hidden_states=latents, timestep=current_timestep, encoder_hidden_states=negative_prompt_embeds, encoder_attention_mask=negative_prompt_attention_mask, return_dict=False, attention_kwargs=self.attention_kwargs, )[0] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond) # apply normalization after classifier-free guidance if cfg_normalization: cond_norm = torch.norm(noise_pred_cond, dim=-1, keepdim=True) noise_norm = torch.norm(noise_pred, dim=-1, keepdim=True) noise_pred = noise_pred * (cond_norm / noise_norm) else: noise_pred = noise_pred_cond # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype noise_pred = -noise_pred latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) else: image = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image) class Lumina2Text2ImgPipeline(Lumina2Pipeline): def __init__( self, transformer: Lumina2Transformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: Gemma2PreTrainedModel, tokenizer: GemmaTokenizer | GemmaTokenizerFast, ): deprecation_message = "`Lumina2Text2ImgPipeline` has been renamed to `Lumina2Pipeline` and will be removed in a future version. Please use `Lumina2Pipeline` instead." deprecate("diffusers.pipelines.lumina2.pipeline_lumina2.Lumina2Text2ImgPipeline", "0.34", deprecation_message) super().__init__( transformer=transformer, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, )
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/lumina2/pipeline_lumina2.py", "license": "Apache License 2.0", "lines": 723, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/models/transformers/test_models_transformer_lumina2.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import Lumina2Transformer2DModel from ...testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class Lumina2Transformer2DModelTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = Lumina2Transformer2DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 2 # N num_channels = 4 # C height = width = 16 # H, W embedding_dim = 32 # D sequence_length = 16 # L hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.rand(size=(batch_size,)).to(torch_device) attention_mask = torch.ones(size=(batch_size, sequence_length), dtype=torch.bool).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, "encoder_attention_mask": attention_mask, } @property def input_shape(self): return (4, 16, 16) @property def output_shape(self): return (4, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 16, "patch_size": 2, "in_channels": 4, "hidden_size": 24, "num_layers": 2, "num_refiner_layers": 1, "num_attention_heads": 3, "num_kv_heads": 1, "multiple_of": 2, "ffn_dim_multiplier": None, "norm_eps": 1e-5, "scaling_factor": 1.0, "axes_dim_rope": (4, 2, 2), "axes_lens": (128, 128, 128), "cap_feat_dim": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"Lumina2Transformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_lumina2.py", "license": "Apache License 2.0", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/lumina2/test_pipeline_lumina2.py
import unittest import torch from transformers import AutoTokenizer, Gemma2Config, Gemma2Model from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Lumina2Pipeline, Lumina2Transformer2DModel, ) from ..test_pipelines_common import PipelineTesterMixin class Lumina2PipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = Lumina2Pipeline params = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) batch_params = frozenset(["prompt", "negative_prompt"]) required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False test_xformers_attention = False test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) transformer = Lumina2Transformer2DModel( sample_size=4, patch_size=2, in_channels=4, hidden_size=8, num_layers=2, num_attention_heads=1, num_kv_heads=1, multiple_of=16, ffn_dim_multiplier=None, norm_eps=1e-5, scaling_factor=1.0, axes_dim_rope=[4, 2, 2], cap_feat_dim=8, ) torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=4, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") torch.manual_seed(0) config = Gemma2Config( head_dim=4, hidden_size=8, intermediate_size=8, num_attention_heads=2, num_hidden_layers=2, num_key_value_heads=2, sliding_window=2, ) text_encoder = Gemma2Model(config) components = { "transformer": transformer, "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 32, "width": 32, "output_type": "np", } return inputs
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/lumina2/test_pipeline_lumina2.py", "license": "Apache License 2.0", "lines": 104, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/community/mixture_tiling_sdxl.py
# Copyright 2025 The DEVAIEXP Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Union import torch from transformers import ( CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, ) from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor try: from ligo.segments import segment except ImportError: raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline") if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionXLPipeline >>> pipe = StableDiffusionXLPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt).images[0] ``` """ def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap): """Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image Returns a tuple with: - Starting coordinates of rows in pixel space - Ending coordinates of rows in pixel space - Starting coordinates of columns in pixel space - Ending coordinates of columns in pixel space """ px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap) px_row_end = px_row_init + tile_height px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap) px_col_end = px_col_init + tile_width return px_row_init, px_row_end, px_col_init, px_col_end def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end): """Translates coordinates in pixel space to coordinates in latent space""" return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8 def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap): """Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image Returns a tuple with: - Starting coordinates of rows in latent space - Ending coordinates of rows in latent space - Starting coordinates of columns in latent space - Ending coordinates of columns in latent space """ px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap ) return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end) def _tile2latent_exclusive_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns ): """Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image Returns a tuple with: - Starting coordinates of rows in latent space - Ending coordinates of rows in latent space - Starting coordinates of columns in latent space - Ending coordinates of columns in latent space """ row_init, row_end, col_init, col_end = _tile2latent_indices( tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap ) row_segment = segment(row_init, row_end) col_segment = segment(col_init, col_end) # Iterate over the rest of tiles, clipping the region for the current tile for row in range(rows): for column in range(columns): if row != tile_row and column != tile_col: clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices( row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap ) row_segment = row_segment - segment(clip_row_init, clip_row_end) col_segment = col_segment - segment(clip_col_init, clip_col_end) # return row_init, row_end, col_init, col_end return row_segment[0], row_segment[1], col_segment[0], col_segment[1] def _get_crops_coords_list(num_rows, num_cols, output_width): """ Generates a list of lists of `crops_coords_top_left` tuples for focusing on different horizontal parts of an image, and repeats this list for the specified number of rows in the output structure. This function calculates `crops_coords_top_left` tuples to create horizontal focus variations (like left, center, right focus) based on `output_width` and `num_cols` (which represents the number of horizontal focus points/columns). It then repeats the *list* of these horizontal focus tuples `num_rows` times to create the final list of lists output structure. Args: num_rows (int): The desired number of rows in the output list of lists. This determines how many times the list of horizontal focus variations will be repeated. num_cols (int): The number of horizontal focus points (columns) to generate. This determines how many horizontal focus variations are created based on dividing the `output_width`. output_width (int): The desired width of the output image. Returns: list[list[tuple[int, int]]]: A list of lists of tuples. Each inner list contains `num_cols` tuples of `(ctop, cleft)`, representing horizontal focus points. The outer list contains `num_rows` such inner lists. """ crops_coords_list = [] if num_cols <= 0: crops_coords_list = [] elif num_cols == 1: crops_coords_list = [(0, 0)] else: section_width = output_width / num_cols for i in range(num_cols): cleft = int(round(i * section_width)) crops_coords_list.append((0, cleft)) result_list = [] for _ in range(num_rows): result_list.append(list(crops_coords_list)) return result_list # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class StableDiffusionXLTilingPipeline( DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None class SeedTilesMode(Enum): """Modes in which the latents of a particular tile can be re-seeded""" FULL = "full" EXCLUSIVE = "exclusive" def encode_prompt( self, prompt: str, prompt_2: str | None = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: str | None = None, negative_prompt_2: str | None = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, grid_cols, seed_tiles_mode, tiles_mode): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt): raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}") if not all(len(row) == grid_cols for row in prompt): raise ValueError("All prompt rows must have the same number of prompt columns") if not isinstance(seed_tiles_mode, str) and ( not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode) ): raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}") if any(mode not in tiles_mode for row in seed_tiles_mode for mode in row): raise ValueError(f"Seed tiles mode must be one of {tiles_mode}") def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def _gaussian_weights(self, tile_width, tile_height, nbatches, device, dtype): """Generates a gaussian mask of weights for tile contributions""" import numpy as np from numpy import exp, pi, sqrt latent_width = tile_width // 8 latent_height = tile_height // 8 var = 0.01 midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1 x_probs = [ exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var) for x in range(latent_width) ] midpoint = latent_height / 2 y_probs = [ exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var) for y in range(latent_height) ] weights_np = np.outer(y_probs, x_probs) weights_torch = torch.tensor(weights_np, device=device) weights_torch = weights_torch.to(dtype) return torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1)) def upcast_vae(self): deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`") self.vae.to(dtype=torch.float32) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding( self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 ) -> torch.Tensor: """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: w (`torch.Tensor`): Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. embedding_dim (`int`, *optional*, defaults to 512): Dimension of the embeddings to generate. dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): Data type of the generated embeddings. Returns: `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: str | None = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Optional[List[List[Tuple[int, int]]]] = None, target_size: Optional[Tuple[int, int]] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Optional[List[List[Tuple[int, int]]]] = None, negative_target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, tile_height: Optional[int] = 1024, tile_width: Optional[int] = 1024, tile_row_overlap: Optional[int] = 128, tile_col_overlap: Optional[int] = 128, guidance_scale_tiles: Optional[List[List[float]]] = None, seed_tiles: Optional[List[List[int]]] = None, seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full", seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`List[List[Tuple[int, int]]]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`List[List[Tuple[int, int]]]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. tile_height (`int`, *optional*, defaults to 1024): Height of each grid tile in pixels. tile_width (`int`, *optional*, defaults to 1024): Width of each grid tile in pixels. tile_row_overlap (`int`, *optional*, defaults to 128): Number of overlapping pixels between tiles in consecutive rows. tile_col_overlap (`int`, *optional*, defaults to 128): Number of overlapping pixels between tiles in consecutive columns. guidance_scale_tiles (`List[List[float]]`, *optional*): Specific weights for classifier-free guidance in each tile. If `None`, the value provided in `guidance_scale` will be used. seed_tiles (`List[List[int]]`, *optional*): Specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard `generator` parameter. seed_tiles_mode (`Union[str, List[List[str]]]`, *optional*, defaults to `"full"`): Mode for seeding tiles, can be `"full"` or `"exclusive"`. If `"full"`, all the latents affected by the tile will be overridden. If `"exclusive"`, only the latents that are exclusively affected by this tile (and no other tiles) will be overridden. seed_reroll_regions (`List[Tuple[int, int, int, int, int]]`, *optional*): A list of tuples in the form of `(start_row, end_row, start_column, end_column, seed)` defining regions in pixel space for which the latents will be overridden using the given seed. Takes priority over `seed_tiles`. **kwargs (`Dict[str, Any]`, *optional*): Additional optional keyword arguments to be passed to the `unet.__call__` and `scheduler.step` functions. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLTilingPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLTilingPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) negative_original_size = negative_original_size or (height, width) negative_target_size = negative_target_size or (height, width) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._interrupt = False grid_rows = len(prompt) grid_cols = len(prompt[0]) tiles_mode = [mode.value for mode in self.SeedTilesMode] if isinstance(seed_tiles_mode, str): seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt] # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, grid_cols, seed_tiles_mode, tiles_mode, ) if seed_reroll_regions is None: seed_reroll_regions = [] batch_size = 1 device = self._execution_device # update crops coords list crops_coords_top_left = _get_crops_coords_list(grid_rows, grid_cols, tile_width) if negative_original_size is not None and negative_target_size is not None: negative_crops_coords_top_left = _get_crops_coords_list(grid_rows, grid_cols, tile_width) # update height and width tile size and tile overlap size height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap) width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap) # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) text_embeddings = [ [ self.encode_prompt( prompt=col, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, lora_scale=lora_scale, clip_skip=self.clip_skip, ) for col in row ] for row in prompt ] # 3. Prepare latents latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8) dtype = text_embeddings[0][0][0].dtype latents = randn_tensor(latents_shape, generator=generator, device=device, dtype=dtype) # 3.1 overwrite latents for specific tiles if provided if seed_tiles is not None: for row in range(grid_rows): for col in range(grid_cols): if (seed_tile := seed_tiles[row][col]) is not None: mode = seed_tiles_mode[row][col] if mode == self.SeedTilesMode.FULL.value: row_init, row_end, col_init, col_end = _tile2latent_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap ) else: row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, grid_rows, grid_cols, ) tile_generator = torch.Generator(device).manual_seed(seed_tile) tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init) latents[:, :, row_init:row_end, col_init:col_end] = torch.randn( tile_shape, generator=tile_generator, device=device ) # 3.2 overwrite again for seed reroll regions for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions: row_init, row_end, col_init, col_end = _pixel2latent_indices( row_init, row_end, col_init, col_end ) # to latent space coordinates reroll_generator = torch.Generator(device).manual_seed(seed_reroll) region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init) latents[:, :, row_init:row_end, col_init:col_end] = torch.randn( region_shape, generator=reroll_generator, device=device ) # 4. Prepare timesteps accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) extra_set_kwargs = {} if accepts_offset: extra_set_kwargs["offset"] = 1 timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, None, None, **extra_set_kwargs ) # if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents * self.scheduler.sigmas[0] # 5. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6. Prepare added time ids & embeddings # text_embeddings order: prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds embeddings_and_added_time = [] for row in range(grid_rows): addition_embed_type_row = [] for col in range(grid_cols): # extract generated values prompt_embeds = text_embeddings[row][col][0] negative_prompt_embeds = text_embeddings[row][col][1] pooled_prompt_embeds = text_embeddings[row][col][2] negative_pooled_prompt_embeds = text_embeddings[row][col][3] add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left[row][col], target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left[row][col], negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) addition_embed_type_row.append((prompt_embeds, add_text_embeds, add_time_ids)) embeddings_and_added_time.append(addition_embed_type_row) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 7. Mask for tile weights strength tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size, device, torch.float32) # 8. Denoising loop self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Diffuse each tile noise_preds = [] for row in range(grid_rows): noise_preds_row = [] for col in range(grid_cols): if self.interrupt: continue px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap ) tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end] # expand the latents if we are doing classifier free guidance latent_model_input = ( torch.cat([tile_latents] * 2) if self.do_classifier_free_guidance else tile_latents ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = { "text_embeds": embeddings_and_added_time[row][col][1], "time_ids": embeddings_and_added_time[row][col][2], } with torch.amp.autocast(device.type, dtype=dtype, enabled=dtype != self.unet.dtype): noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=embeddings_and_added_time[row][col][0], cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) guidance = ( guidance_scale if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None else guidance_scale_tiles[row][col] ) noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond) noise_preds_row.append(noise_pred_tile) noise_preds.append(noise_preds_row) # Stitch noise predictions for all tiles noise_pred = torch.zeros(latents.shape, device=device) contributors = torch.zeros(latents.shape, device=device) # Add each tile contribution to overall latents for row in range(grid_rows): for col in range(grid_cols): px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices( row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap ) noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += ( noise_preds[row][col] * tile_weights ) contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights # Average overlapping areas with more than 1 contributor noise_pred /= contributors noise_pred = noise_pred.to(dtype) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) # update progress bar if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) elif latents.dtype != self.vae.dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 self.vae = self.vae.to(latents.dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/mixture_tiling_sdxl.py", "license": "Apache License 2.0", "lines": 1054, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:scripts/convert_omnigen_to_diffusers.py
import argparse import os import torch from huggingface_hub import snapshot_download from safetensors.torch import load_file from transformers import AutoTokenizer from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, OmniGenPipeline, OmniGenTransformer2DModel def main(args): # checkpoint from https://huggingface.co/Shitao/OmniGen-v1 if not os.path.exists(args.origin_ckpt_path): print("Model not found, downloading...") cache_folder = os.getenv("HF_HUB_CACHE") args.origin_ckpt_path = snapshot_download( repo_id=args.origin_ckpt_path, cache_dir=cache_folder, ignore_patterns=["flax_model.msgpack", "rust_model.ot", "tf_model.h5", "model.pt"], ) print(f"Downloaded model to {args.origin_ckpt_path}") ckpt = os.path.join(args.origin_ckpt_path, "model.safetensors") ckpt = load_file(ckpt, device="cpu") mapping_dict = { "pos_embed": "patch_embedding.pos_embed", "x_embedder.proj.weight": "patch_embedding.output_image_proj.weight", "x_embedder.proj.bias": "patch_embedding.output_image_proj.bias", "input_x_embedder.proj.weight": "patch_embedding.input_image_proj.weight", "input_x_embedder.proj.bias": "patch_embedding.input_image_proj.bias", "final_layer.adaLN_modulation.1.weight": "norm_out.linear.weight", "final_layer.adaLN_modulation.1.bias": "norm_out.linear.bias", "final_layer.linear.weight": "proj_out.weight", "final_layer.linear.bias": "proj_out.bias", "time_token.mlp.0.weight": "time_token.linear_1.weight", "time_token.mlp.0.bias": "time_token.linear_1.bias", "time_token.mlp.2.weight": "time_token.linear_2.weight", "time_token.mlp.2.bias": "time_token.linear_2.bias", "t_embedder.mlp.0.weight": "t_embedder.linear_1.weight", "t_embedder.mlp.0.bias": "t_embedder.linear_1.bias", "t_embedder.mlp.2.weight": "t_embedder.linear_2.weight", "t_embedder.mlp.2.bias": "t_embedder.linear_2.bias", "llm.embed_tokens.weight": "embed_tokens.weight", } converted_state_dict = {} for k, v in ckpt.items(): if k in mapping_dict: converted_state_dict[mapping_dict[k]] = v elif "qkv" in k: to_q, to_k, to_v = v.chunk(3) converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_q.weight"] = to_q converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_k.weight"] = to_k converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_v.weight"] = to_v elif "o_proj" in k: converted_state_dict[f"layers.{k.split('.')[2]}.self_attn.to_out.0.weight"] = v else: converted_state_dict[k[4:]] = v transformer = OmniGenTransformer2DModel( rope_scaling={ "long_factor": [ 1.0299999713897705, 1.0499999523162842, 1.0499999523162842, 1.0799999237060547, 1.2299998998641968, 1.2299998998641968, 1.2999999523162842, 1.4499999284744263, 1.5999999046325684, 1.6499998569488525, 1.8999998569488525, 2.859999895095825, 3.68999981880188, 5.419999599456787, 5.489999771118164, 5.489999771118164, 9.09000015258789, 11.579999923706055, 15.65999984741211, 15.769999504089355, 15.789999961853027, 18.360000610351562, 21.989999771118164, 23.079999923706055, 30.009998321533203, 32.35000228881836, 32.590003967285156, 35.56000518798828, 39.95000457763672, 53.840003967285156, 56.20000457763672, 57.95000457763672, 59.29000473022461, 59.77000427246094, 59.920005798339844, 61.190006256103516, 61.96000671386719, 62.50000762939453, 63.3700065612793, 63.48000717163086, 63.48000717163086, 63.66000747680664, 63.850006103515625, 64.08000946044922, 64.760009765625, 64.80001068115234, 64.81001281738281, 64.81001281738281, ], "short_factor": [ 1.05, 1.05, 1.05, 1.1, 1.1, 1.1, 1.2500000000000002, 1.2500000000000002, 1.4000000000000004, 1.4500000000000004, 1.5500000000000005, 1.8500000000000008, 1.9000000000000008, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.000000000000001, 2.1000000000000005, 2.1000000000000005, 2.2, 2.3499999999999996, 2.3499999999999996, 2.3499999999999996, 2.3499999999999996, 2.3999999999999995, 2.3999999999999995, 2.6499999999999986, 2.6999999999999984, 2.8999999999999977, 2.9499999999999975, 3.049999999999997, 3.049999999999997, 3.049999999999997, ], "type": "su", }, patch_size=2, in_channels=4, pos_embed_max_size=192, ) transformer.load_state_dict(converted_state_dict, strict=True) transformer.to(torch.bfloat16) num_model_params = sum(p.numel() for p in transformer.parameters()) print(f"Total number of transformer parameters: {num_model_params}") scheduler = FlowMatchEulerDiscreteScheduler(invert_sigmas=True, num_train_timesteps=1) vae = AutoencoderKL.from_pretrained(os.path.join(args.origin_ckpt_path, "vae"), torch_dtype=torch.float32) tokenizer = AutoTokenizer.from_pretrained(args.origin_ckpt_path) pipeline = OmniGenPipeline(tokenizer=tokenizer, transformer=transformer, vae=vae, scheduler=scheduler) pipeline.save_pretrained(args.dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--origin_ckpt_path", default="Shitao/OmniGen-v1", type=str, required=False, help="Path to the checkpoint to convert.", ) parser.add_argument( "--dump_path", default="OmniGen-v1-diffusers", type=str, required=False, help="Path to the output pipeline." ) args = parser.parse_args() main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "scripts/convert_omnigen_to_diffusers.py", "license": "Apache License 2.0", "lines": 184, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:src/diffusers/models/transformers/transformer_omnigen.py
# Copyright 2025 OmniGen team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ..attention_processor import Attention from ..embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNorm, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class OmniGenFeedForward(nn.Module): def __init__(self, hidden_size: int, intermediate_size: int): super().__init__() self.gate_up_proj = nn.Linear(hidden_size, 2 * intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.activation_fn = nn.SiLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: up_states = self.gate_up_proj(hidden_states) gate, up_states = up_states.chunk(2, dim=-1) up_states = up_states * self.activation_fn(gate) return self.down_proj(up_states) class OmniGenPatchEmbed(nn.Module): def __init__( self, patch_size: int = 2, in_channels: int = 4, embed_dim: int = 768, bias: bool = True, interpolation_scale: float = 1, pos_embed_max_size: int = 192, base_size: int = 64, ): super().__init__() self.output_image_proj = nn.Conv2d( in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias ) self.input_image_proj = nn.Conv2d( in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias ) self.patch_size = patch_size self.interpolation_scale = interpolation_scale self.pos_embed_max_size = pos_embed_max_size pos_embed = get_2d_sincos_pos_embed( embed_dim, self.pos_embed_max_size, base_size=base_size, interpolation_scale=self.interpolation_scale, output_type="pt", ) self.register_buffer("pos_embed", pos_embed.float().unsqueeze(0), persistent=True) def _cropped_pos_embed(self, height, width): """Crops positional embeddings for SD3 compatibility.""" if self.pos_embed_max_size is None: raise ValueError("`pos_embed_max_size` must be set for cropping.") height = height // self.patch_size width = width // self.patch_size if height > self.pos_embed_max_size: raise ValueError( f"Height ({height}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}." ) if width > self.pos_embed_max_size: raise ValueError( f"Width ({width}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}." ) top = (self.pos_embed_max_size - height) // 2 left = (self.pos_embed_max_size - width) // 2 spatial_pos_embed = self.pos_embed.reshape(1, self.pos_embed_max_size, self.pos_embed_max_size, -1) spatial_pos_embed = spatial_pos_embed[:, top : top + height, left : left + width, :] spatial_pos_embed = spatial_pos_embed.reshape(1, -1, spatial_pos_embed.shape[-1]) return spatial_pos_embed def _patch_embeddings(self, hidden_states: torch.Tensor, is_input_image: bool) -> torch.Tensor: if is_input_image: hidden_states = self.input_image_proj(hidden_states) else: hidden_states = self.output_image_proj(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) return hidden_states def forward( self, hidden_states: torch.Tensor, is_input_image: bool, padding_latent: torch.Tensor = None ) -> torch.Tensor: if isinstance(hidden_states, list): if padding_latent is None: padding_latent = [None] * len(hidden_states) patched_latents = [] for sub_latent, padding in zip(hidden_states, padding_latent): height, width = sub_latent.shape[-2:] sub_latent = self._patch_embeddings(sub_latent, is_input_image) pos_embed = self._cropped_pos_embed(height, width) sub_latent = sub_latent + pos_embed if padding is not None: sub_latent = torch.cat([sub_latent, padding.to(sub_latent.device)], dim=-2) patched_latents.append(sub_latent) else: height, width = hidden_states.shape[-2:] pos_embed = self._cropped_pos_embed(height, width) hidden_states = self._patch_embeddings(hidden_states, is_input_image) patched_latents = hidden_states + pos_embed return patched_latents class OmniGenSuScaledRotaryEmbedding(nn.Module): def __init__( self, dim, max_position_embeddings=131072, original_max_position_embeddings=4096, base=10000, rope_scaling=None ): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim)) self.register_buffer("inv_freq", tensor=inv_freq, persistent=False) self.short_factor = rope_scaling["short_factor"] self.long_factor = rope_scaling["long_factor"] self.original_max_position_embeddings = original_max_position_embeddings def forward(self, hidden_states, position_ids): seq_len = torch.max(position_ids) + 1 if seq_len > self.original_max_position_embeddings: ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=hidden_states.device) else: ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=hidden_states.device) inv_freq_shape = ( torch.arange(0, self.dim, 2, dtype=torch.int64, device=hidden_states.device).float() / self.dim ) self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 since bfloat16 loses precision on long contexts # See https://github.com/huggingface/transformers/pull/29285 device_type = hidden_states.device.type device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1)[0] scale = self.max_position_embeddings / self.original_max_position_embeddings if scale <= 1.0: scaling_factor = 1.0 else: scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings)) cos = emb.cos() * scaling_factor sin = emb.sin() * scaling_factor return cos, sin class OmniGenAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is used in the OmniGen model. """ def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: batch_size, sequence_length, _ = hidden_states.shape # Get Query-Key-Value Pair query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) bsz, q_len, query_dim = query.size() inner_dim = key.shape[-1] head_dim = query_dim // attn.heads # Get key-value heads kv_heads = inner_dim // head_dim query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, kv_heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, kv_heads, head_dim).transpose(1, 2) # Apply RoPE if needed if image_rotary_emb is not None: from ..embeddings import apply_rotary_emb query = apply_rotary_emb(query, image_rotary_emb, use_real_unbind_dim=-2) key = apply_rotary_emb(key, image_rotary_emb, use_real_unbind_dim=-2) hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask) hidden_states = hidden_states.transpose(1, 2).type_as(query) hidden_states = hidden_states.reshape(bsz, q_len, attn.out_dim) hidden_states = attn.to_out[0](hidden_states) return hidden_states class OmniGenBlock(nn.Module): def __init__( self, hidden_size: int, num_attention_heads: int, num_key_value_heads: int, intermediate_size: int, rms_norm_eps: float, ) -> None: super().__init__() self.input_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps) self.self_attn = Attention( query_dim=hidden_size, cross_attention_dim=hidden_size, dim_head=hidden_size // num_attention_heads, heads=num_attention_heads, kv_heads=num_key_value_heads, bias=False, out_dim=hidden_size, out_bias=False, processor=OmniGenAttnProcessor2_0(), ) self.post_attention_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps) self.mlp = OmniGenFeedForward(hidden_size, intermediate_size) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor ) -> torch.Tensor: # 1. Attention norm_hidden_states = self.input_layernorm(hidden_states) attn_output = self.self_attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_hidden_states, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb, ) hidden_states = hidden_states + attn_output # 2. Feed Forward norm_hidden_states = self.post_attention_layernorm(hidden_states) ff_output = self.mlp(norm_hidden_states) hidden_states = hidden_states + ff_output return hidden_states class OmniGenTransformer2DModel(ModelMixin, ConfigMixin): """ The Transformer model introduced in OmniGen (https://huggingface.co/papers/2409.11340). Parameters: in_channels (`int`, defaults to `4`): The number of channels in the input. patch_size (`int`, defaults to `2`): The size of the spatial patches to use in the patch embedding layer. hidden_size (`int`, defaults to `3072`): The dimensionality of the hidden layers in the model. rms_norm_eps (`float`, defaults to `1e-5`): Eps for RMSNorm layer. num_attention_heads (`int`, defaults to `32`): The number of heads to use for multi-head attention. num_key_value_heads (`int`, defaults to `32`): The number of heads to use for keys and values in multi-head attention. intermediate_size (`int`, defaults to `8192`): Dimension of the hidden layer in FeedForward layers. num_layers (`int`, default to `32`): The number of layers of transformer blocks to use. pad_token_id (`int`, default to `32000`): The id of the padding token. vocab_size (`int`, default to `32064`): The size of the vocabulary of the embedding vocabulary. rope_base (`int`, default to `10000`): The default theta value to use when creating RoPE. rope_scaling (`dict`, optional): The scaling factors for the RoPE. Must contain `short_factor` and `long_factor`. pos_embed_max_size (`int`, default to `192`): The maximum size of the positional embeddings. time_step_dim (`int`, default to `256`): Output dimension of timestep embeddings. flip_sin_to_cos (`bool`, default to `True`): Whether to flip the sin and cos in the positional embeddings when preparing timestep embeddings. downscale_freq_shift (`int`, default to `0`): The frequency shift to use when downscaling the timestep embeddings. timestep_activation_fn (`str`, default to `silu`): The activation function to use for the timestep embeddings. """ _supports_gradient_checkpointing = True _no_split_modules = ["OmniGenBlock"] _skip_layerwise_casting_patterns = ["patch_embedding", "embed_tokens", "norm"] @register_to_config def __init__( self, in_channels: int = 4, patch_size: int = 2, hidden_size: int = 3072, rms_norm_eps: float = 1e-5, num_attention_heads: int = 32, num_key_value_heads: int = 32, intermediate_size: int = 8192, num_layers: int = 32, pad_token_id: int = 32000, vocab_size: int = 32064, max_position_embeddings: int = 131072, original_max_position_embeddings: int = 4096, rope_base: int = 10000, rope_scaling: dict = None, pos_embed_max_size: int = 192, time_step_dim: int = 256, flip_sin_to_cos: bool = True, downscale_freq_shift: int = 0, timestep_activation_fn: str = "silu", ): super().__init__() self.in_channels = in_channels self.out_channels = in_channels self.patch_embedding = OmniGenPatchEmbed( patch_size=patch_size, in_channels=in_channels, embed_dim=hidden_size, pos_embed_max_size=pos_embed_max_size, ) self.time_proj = Timesteps(time_step_dim, flip_sin_to_cos, downscale_freq_shift) self.time_token = TimestepEmbedding(time_step_dim, hidden_size, timestep_activation_fn) self.t_embedder = TimestepEmbedding(time_step_dim, hidden_size, timestep_activation_fn) self.embed_tokens = nn.Embedding(vocab_size, hidden_size, pad_token_id) self.rope = OmniGenSuScaledRotaryEmbedding( hidden_size // num_attention_heads, max_position_embeddings=max_position_embeddings, original_max_position_embeddings=original_max_position_embeddings, base=rope_base, rope_scaling=rope_scaling, ) self.layers = nn.ModuleList( [ OmniGenBlock(hidden_size, num_attention_heads, num_key_value_heads, intermediate_size, rms_norm_eps) for _ in range(num_layers) ] ) self.norm = RMSNorm(hidden_size, eps=rms_norm_eps) self.norm_out = AdaLayerNorm(hidden_size, norm_elementwise_affine=False, norm_eps=1e-6, chunk_dim=1) self.proj_out = nn.Linear(hidden_size, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False def _get_multimodal_embeddings( self, input_ids: torch.Tensor, input_img_latents: list[torch.Tensor], input_image_sizes: dict ) -> torch.Tensor | None: if input_ids is None: return None input_img_latents = [x.to(self.dtype) for x in input_img_latents] condition_tokens = self.embed_tokens(input_ids) input_img_inx = 0 input_image_tokens = self.patch_embedding(input_img_latents, is_input_image=True) for b_inx in input_image_sizes.keys(): for start_inx, end_inx in input_image_sizes[b_inx]: # replace the placeholder in text tokens with the image embedding. condition_tokens[b_inx, start_inx:end_inx] = input_image_tokens[input_img_inx].to( condition_tokens.dtype ) input_img_inx += 1 return condition_tokens def forward( self, hidden_states: torch.Tensor, timestep: int | float | torch.FloatTensor, input_ids: torch.Tensor, input_img_latents: list[torch.Tensor], input_image_sizes: dict[int, list[int]], attention_mask: torch.Tensor, position_ids: torch.Tensor, return_dict: bool = True, ) -> Transformer2DModelOutput | tuple[torch.Tensor]: batch_size, num_channels, height, width = hidden_states.shape p = self.config.patch_size post_patch_height, post_patch_width = height // p, width // p # 1. Patch & Timestep & Conditional Embedding hidden_states = self.patch_embedding(hidden_states, is_input_image=False) num_tokens_for_output_image = hidden_states.size(1) timestep_proj = self.time_proj(timestep).type_as(hidden_states) time_token = self.time_token(timestep_proj).unsqueeze(1) temb = self.t_embedder(timestep_proj) condition_tokens = self._get_multimodal_embeddings(input_ids, input_img_latents, input_image_sizes) if condition_tokens is not None: hidden_states = torch.cat([condition_tokens, time_token, hidden_states], dim=1) else: hidden_states = torch.cat([time_token, hidden_states], dim=1) seq_length = hidden_states.size(1) position_ids = position_ids.view(-1, seq_length).long() # 2. Attention mask preprocessing if attention_mask is not None and attention_mask.dim() == 3: dtype = hidden_states.dtype min_dtype = torch.finfo(dtype).min attention_mask = (1 - attention_mask) * min_dtype attention_mask = attention_mask.unsqueeze(1).type_as(hidden_states) # 3. Rotary position embedding image_rotary_emb = self.rope(hidden_states, position_ids) # 4. Transformer blocks for block in self.layers: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, attention_mask, image_rotary_emb ) else: hidden_states = block(hidden_states, attention_mask=attention_mask, image_rotary_emb=image_rotary_emb) # 5. Output norm & projection hidden_states = self.norm(hidden_states) hidden_states = hidden_states[:, -num_tokens_for_output_image:] hidden_states = self.norm_out(hidden_states, temb=temb) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch_size, post_patch_height, post_patch_width, p, p, -1) output = hidden_states.permute(0, 5, 1, 3, 2, 4).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_omnigen.py", "license": "Apache License 2.0", "lines": 398, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/omnigen/pipeline_omnigen.py
# Copyright 2025 OmniGen team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Callable import numpy as np import torch from transformers import LlamaTokenizer from ...image_processor import PipelineImageInput, VaeImageProcessor from ...models.autoencoders import AutoencoderKL from ...models.transformers import OmniGenTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import deprecate, is_torch_xla_available, is_torchvision_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput if is_torchvision_available(): from .processor_omnigen import OmniGenMultiModalProcessor if is_torch_xla_available(): XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import OmniGenPipeline >>> pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1-diffusers", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> prompt = "A cat holding a sign that says hello world" >>> # Depending on the variant being used, the pipeline call will slightly vary. >>> # Refer to the pipeline documentation for more details. >>> image = pipe(prompt, num_inference_steps=50, guidance_scale=2.5).images[0] >>> image.save("output.png") ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class OmniGenPipeline( DiffusionPipeline, ): r""" The OmniGen pipeline for multimodal-to-image generation. Reference: https://huggingface.co/papers/2409.11340 Args: transformer ([`OmniGenTransformer2DModel`]): Autoregressive Transformer architecture for OmniGen. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. tokenizer (`LlamaTokenizer`): Text tokenizer of class. [LlamaTokenizer](https://huggingface.co/docs/transformers/main/model_doc/llama#transformers.LlamaTokenizer). """ model_cpu_offload_seq = "transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents"] def __init__( self, transformer: OmniGenTransformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, tokenizer: LlamaTokenizer, ): super().__init__() self.register_modules( vae=vae, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) is not None else 8 ) # OmniGen latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.multimodal_processor = OmniGenMultiModalProcessor(tokenizer, max_image_size=1024) self.tokenizer_max_length = ( self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 120000 ) self.default_sample_size = 128 def encode_input_images( self, input_pixel_values: list[torch.Tensor], device: torch.device | None = None, dtype: torch.dtype | None = None, ): """ get the continue embedding of input images by VAE Args: input_pixel_values: normalized pixel of input images device: Returns: torch.Tensor """ device = device or self._execution_device dtype = dtype or self.vae.dtype input_img_latents = [] for img in input_pixel_values: img = self.vae.encode(img.to(device, dtype)).latent_dist.sample().mul_(self.vae.config.scaling_factor) input_img_latents.append(img) return input_img_latents def check_inputs( self, prompt, input_images, height, width, use_input_image_size_as_output, callback_on_step_end_tensor_inputs=None, ): if input_images is not None: if len(input_images) != len(prompt): raise ValueError( f"The number of prompts: {len(prompt)} does not match the number of input images: {len(input_images)}." ) for i in range(len(input_images)): if input_images[i] is not None: if not all(f"<img><|image_{k + 1}|></img>" in prompt[i] for k in range(len(input_images[i]))): raise ValueError( f"prompt `{prompt[i]}` doesn't have enough placeholders for the input images `{input_images[i]}`" ) if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" ) if use_input_image_size_as_output: if input_images is None or input_images[0] is None: raise ValueError( "`use_input_image_size_as_output` is set to True, but no input image was found. If you are performing a text-to-image task, please set it to False." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`." deprecate( "enable_vae_slicing", "0.40.0", depr_message, ) self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`." deprecate( "disable_vae_slicing", "0.40.0", depr_message, ) self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." deprecate( "enable_vae_tiling", "0.40.0", depr_message, ) self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." deprecate( "disable_vae_tiling", "0.40.0", depr_message, ) self.vae.disable_tiling() # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str], input_images: PipelineImageInput | list[PipelineImageInput] = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, max_input_image_size: int = 1024, timesteps: list[int] = None, guidance_scale: float = 2.5, img_guidance_scale: float = 1.6, use_input_image_size_as_output: bool = False, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If the input includes images, need to add placeholders `<img><|image_i|></img>` in the prompt to indicate the position of the i-th images. input_images (`PipelineImageInput` or `list[PipelineImageInput]`, *optional*): The list of input images. We will replace the "<|image_i|>" in prompt with the i-th image in list. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. max_input_image_size (`int`, *optional*, defaults to 1024): the maximum size of input image, which will be used to crop the input image to the maximum size timesteps (`list[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 2.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. img_guidance_scale (`float`, *optional*, defaults to 1.6): Defined as equation 3 in [Instrucpix2pix](https://huggingface.co/papers/2211.09800). use_input_image_size_as_output (bool, defaults to False): whether to use the input image size as the output image size, which can be used for single-image input, e.g., image editing task num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`list`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor num_cfg = 2 if input_images is not None else 1 use_img_cfg = True if input_images is not None else False if isinstance(prompt, str): prompt = [prompt] input_images = [input_images] # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, input_images, height, width, use_input_image_size_as_output, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._interrupt = False # 2. Define call parameters batch_size = len(prompt) device = self._execution_device # 3. process multi-modal instructions if max_input_image_size != self.multimodal_processor.max_image_size: self.multimodal_processor.reset_max_image_size(max_image_size=max_input_image_size) processed_data = self.multimodal_processor( prompt, input_images, height=height, width=width, use_img_cfg=use_img_cfg, use_input_image_size_as_output=use_input_image_size_as_output, num_images_per_prompt=num_images_per_prompt, ) processed_data["input_ids"] = processed_data["input_ids"].to(device) processed_data["attention_mask"] = processed_data["attention_mask"].to(device) processed_data["position_ids"] = processed_data["position_ids"].to(device) # 4. Encode input images input_img_latents = self.encode_input_images(processed_data["input_pixel_values"], device=device) # 5. Prepare timesteps sigmas = np.linspace(1, 0, num_inference_steps + 1)[:num_inference_steps] if XLA_AVAILABLE: timestep_device = "cpu" else: timestep_device = device timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, timestep_device, timesteps, sigmas=sigmas ) self._num_timesteps = len(timesteps) # 6. Prepare latents transformer_dtype = self.transformer.dtype if use_input_image_size_as_output: height, width = processed_data["input_pixel_values"][0].shape[-2:] latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, torch.float32, device, generator, latents, ) # 8. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * (num_cfg + 1)) latent_model_input = latent_model_input.to(transformer_dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, input_ids=processed_data["input_ids"], input_img_latents=input_img_latents, input_image_sizes=processed_data["input_image_sizes"], attention_mask=processed_data["attention_mask"], position_ids=processed_data["position_ids"], return_dict=False, )[0] if num_cfg == 2: cond, uncond, img_cond = torch.split(noise_pred, len(noise_pred) // 3, dim=0) noise_pred = uncond + img_guidance_scale * (img_cond - uncond) + guidance_scale * (cond - img_cond) else: cond, uncond = torch.split(noise_pred, len(noise_pred) // 2, dim=0) noise_pred = uncond + guidance_scale * (cond - uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) progress_bar.update() if not output_type == "latent": latents = latents.to(self.vae.dtype) latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) else: image = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/omnigen/pipeline_omnigen.py", "license": "Apache License 2.0", "lines": 477, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/omnigen/processor_omnigen.py
# Copyright 2025 OmniGen team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import numpy as np import torch from PIL import Image from ...utils import is_torchvision_available if is_torchvision_available(): from torchvision import transforms def crop_image(pil_image, max_image_size): """ Crop the image so that its height and width does not exceed `max_image_size`, while ensuring both the height and width are multiples of 16. """ while min(*pil_image.size) >= 2 * max_image_size: pil_image = pil_image.resize(tuple(x // 2 for x in pil_image.size), resample=Image.BOX) if max(*pil_image.size) > max_image_size: scale = max_image_size / max(*pil_image.size) pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC) if min(*pil_image.size) < 16: scale = 16 / min(*pil_image.size) pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC) arr = np.array(pil_image) crop_y1 = (arr.shape[0] % 16) // 2 crop_y2 = arr.shape[0] % 16 - crop_y1 crop_x1 = (arr.shape[1] % 16) // 2 crop_x2 = arr.shape[1] % 16 - crop_x1 arr = arr[crop_y1 : arr.shape[0] - crop_y2, crop_x1 : arr.shape[1] - crop_x2] return Image.fromarray(arr) class OmniGenMultiModalProcessor: def __init__(self, text_tokenizer, max_image_size: int = 1024): self.text_tokenizer = text_tokenizer self.max_image_size = max_image_size self.image_transform = transforms.Compose( [ transforms.Lambda(lambda pil_image: crop_image(pil_image, max_image_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), ] ) self.collator = OmniGenCollator() def reset_max_image_size(self, max_image_size): self.max_image_size = max_image_size self.image_transform = transforms.Compose( [ transforms.Lambda(lambda pil_image: crop_image(pil_image, max_image_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), ] ) def process_image(self, image): if isinstance(image, str): image = Image.open(image).convert("RGB") return self.image_transform(image) def process_multi_modal_prompt(self, text, input_images): text = self.add_prefix_instruction(text) if input_images is None or len(input_images) == 0: model_inputs = self.text_tokenizer(text) return {"input_ids": model_inputs.input_ids, "pixel_values": None, "image_sizes": None} pattern = r"<\|image_\d+\|>" prompt_chunks = [self.text_tokenizer(chunk).input_ids for chunk in re.split(pattern, text)] for i in range(1, len(prompt_chunks)): if prompt_chunks[i][0] == 1: prompt_chunks[i] = prompt_chunks[i][1:] image_tags = re.findall(pattern, text) image_ids = [int(s.split("|")[1].split("_")[-1]) for s in image_tags] unique_image_ids = sorted(set(image_ids)) assert unique_image_ids == list(range(1, len(unique_image_ids) + 1)), ( f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}" ) # total images must be the same as the number of image tags assert len(unique_image_ids) == len(input_images), ( f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(input_images)} images" ) input_images = [input_images[x - 1] for x in image_ids] all_input_ids = [] img_inx = [] for i in range(len(prompt_chunks)): all_input_ids.extend(prompt_chunks[i]) if i != len(prompt_chunks) - 1: start_inx = len(all_input_ids) size = input_images[i].size(-2) * input_images[i].size(-1) // 16 // 16 img_inx.append([start_inx, start_inx + size]) all_input_ids.extend([0] * size) return {"input_ids": all_input_ids, "pixel_values": input_images, "image_sizes": img_inx} def add_prefix_instruction(self, prompt): user_prompt = "<|user|>\n" generation_prompt = "Generate an image according to the following instructions\n" assistant_prompt = "<|assistant|>\n<|diffusion|>" prompt_suffix = "<|end|>\n" prompt = f"{user_prompt}{generation_prompt}{prompt}{prompt_suffix}{assistant_prompt}" return prompt def __call__( self, instructions: list[str], input_images: list[list[str]] = None, height: int = 1024, width: int = 1024, negative_prompt: str = "low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers.", use_img_cfg: bool = True, separate_cfg_input: bool = False, use_input_image_size_as_output: bool = False, num_images_per_prompt: int = 1, ) -> dict: if isinstance(instructions, str): instructions = [instructions] input_images = [input_images] input_data = [] for i in range(len(instructions)): cur_instruction = instructions[i] cur_input_images = None if input_images is None else input_images[i] if cur_input_images is not None and len(cur_input_images) > 0: cur_input_images = [self.process_image(x) for x in cur_input_images] else: cur_input_images = None assert "<img><|image_1|></img>" not in cur_instruction mllm_input = self.process_multi_modal_prompt(cur_instruction, cur_input_images) neg_mllm_input, img_cfg_mllm_input = None, None neg_mllm_input = self.process_multi_modal_prompt(negative_prompt, None) if use_img_cfg: if cur_input_images is not None and len(cur_input_images) >= 1: img_cfg_prompt = [f"<img><|image_{i + 1}|></img>" for i in range(len(cur_input_images))] img_cfg_mllm_input = self.process_multi_modal_prompt(" ".join(img_cfg_prompt), cur_input_images) else: img_cfg_mllm_input = neg_mllm_input for _ in range(num_images_per_prompt): if use_input_image_size_as_output: input_data.append( ( mllm_input, neg_mllm_input, img_cfg_mllm_input, [mllm_input["pixel_values"][0].size(-2), mllm_input["pixel_values"][0].size(-1)], ) ) else: input_data.append((mllm_input, neg_mllm_input, img_cfg_mllm_input, [height, width])) return self.collator(input_data) class OmniGenCollator: def __init__(self, pad_token_id=2, hidden_size=3072): self.pad_token_id = pad_token_id self.hidden_size = hidden_size def create_position(self, attention_mask, num_tokens_for_output_images): position_ids = [] text_length = attention_mask.size(-1) img_length = max(num_tokens_for_output_images) for mask in attention_mask: temp_l = torch.sum(mask) temp_position = [0] * (text_length - temp_l) + list( range(temp_l + img_length + 1) ) # we add a time embedding into the sequence, so add one more token position_ids.append(temp_position) return torch.LongTensor(position_ids) def create_mask(self, attention_mask, num_tokens_for_output_images): """ OmniGen applies causal attention to each element in the sequence, but applies bidirectional attention within each image sequence References: [OmniGen](https://huggingface.co/papers/2409.11340) """ extended_mask = [] padding_images = [] text_length = attention_mask.size(-1) img_length = max(num_tokens_for_output_images) seq_len = text_length + img_length + 1 # we add a time embedding into the sequence, so add one more token inx = 0 for mask in attention_mask: temp_l = torch.sum(mask) pad_l = text_length - temp_l temp_mask = torch.tril(torch.ones(size=(temp_l + 1, temp_l + 1))) image_mask = torch.zeros(size=(temp_l + 1, img_length)) temp_mask = torch.cat([temp_mask, image_mask], dim=-1) image_mask = torch.ones(size=(img_length, temp_l + img_length + 1)) temp_mask = torch.cat([temp_mask, image_mask], dim=0) if pad_l > 0: pad_mask = torch.zeros(size=(temp_l + 1 + img_length, pad_l)) temp_mask = torch.cat([pad_mask, temp_mask], dim=-1) pad_mask = torch.ones(size=(pad_l, seq_len)) temp_mask = torch.cat([pad_mask, temp_mask], dim=0) true_img_length = num_tokens_for_output_images[inx] pad_img_length = img_length - true_img_length if pad_img_length > 0: temp_mask[:, -pad_img_length:] = 0 temp_padding_imgs = torch.zeros(size=(1, pad_img_length, self.hidden_size)) else: temp_padding_imgs = None extended_mask.append(temp_mask.unsqueeze(0)) padding_images.append(temp_padding_imgs) inx += 1 return torch.cat(extended_mask, dim=0), padding_images def adjust_attention_for_input_images(self, attention_mask, image_sizes): for b_inx in image_sizes.keys(): for start_inx, end_inx in image_sizes[b_inx]: attention_mask[b_inx][start_inx:end_inx, start_inx:end_inx] = 1 return attention_mask def pad_input_ids(self, input_ids, image_sizes): max_l = max([len(x) for x in input_ids]) padded_ids = [] attention_mask = [] for i in range(len(input_ids)): temp_ids = input_ids[i] temp_l = len(temp_ids) pad_l = max_l - temp_l if pad_l == 0: attention_mask.append([1] * max_l) padded_ids.append(temp_ids) else: attention_mask.append([0] * pad_l + [1] * temp_l) padded_ids.append([self.pad_token_id] * pad_l + temp_ids) if i in image_sizes: new_inx = [] for old_inx in image_sizes[i]: new_inx.append([x + pad_l for x in old_inx]) image_sizes[i] = new_inx return torch.LongTensor(padded_ids), torch.LongTensor(attention_mask), image_sizes def process_mllm_input(self, mllm_inputs, target_img_size): num_tokens_for_output_images = [] for img_size in target_img_size: num_tokens_for_output_images.append(img_size[0] * img_size[1] // 16 // 16) pixel_values, image_sizes = [], {} b_inx = 0 for x in mllm_inputs: if x["pixel_values"] is not None: pixel_values.extend(x["pixel_values"]) for size in x["image_sizes"]: if b_inx not in image_sizes: image_sizes[b_inx] = [size] else: image_sizes[b_inx].append(size) b_inx += 1 pixel_values = [x.unsqueeze(0) for x in pixel_values] input_ids = [x["input_ids"] for x in mllm_inputs] padded_input_ids, attention_mask, image_sizes = self.pad_input_ids(input_ids, image_sizes) position_ids = self.create_position(attention_mask, num_tokens_for_output_images) attention_mask, padding_images = self.create_mask(attention_mask, num_tokens_for_output_images) attention_mask = self.adjust_attention_for_input_images(attention_mask, image_sizes) return padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes def __call__(self, features): mllm_inputs = [f[0] for f in features] cfg_mllm_inputs = [f[1] for f in features] img_cfg_mllm_input = [f[2] for f in features] target_img_size = [f[3] for f in features] if img_cfg_mllm_input[0] is not None: mllm_inputs = mllm_inputs + cfg_mllm_inputs + img_cfg_mllm_input target_img_size = target_img_size + target_img_size + target_img_size else: mllm_inputs = mllm_inputs + cfg_mllm_inputs target_img_size = target_img_size + target_img_size ( all_padded_input_ids, all_position_ids, all_attention_mask, all_padding_images, all_pixel_values, all_image_sizes, ) = self.process_mllm_input(mllm_inputs, target_img_size) data = { "input_ids": all_padded_input_ids, "attention_mask": all_attention_mask, "position_ids": all_position_ids, "input_pixel_values": all_pixel_values, "input_image_sizes": all_image_sizes, } return data
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/omnigen/processor_omnigen.py", "license": "Apache License 2.0", "lines": 273, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/models/transformers/test_models_transformer_omnigen.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import OmniGenTransformer2DModel from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class OmniGenTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = OmniGenTransformer2DModel main_input_name = "hidden_states" uses_custom_attn_processor = True model_split_percents = [0.1, 0.1, 0.1] @property def dummy_input(self): batch_size = 2 num_channels = 4 height = 8 width = 8 sequence_length = 24 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) timestep = torch.rand(size=(batch_size,), dtype=hidden_states.dtype).to(torch_device) input_ids = torch.randint(0, 10, (batch_size, sequence_length)).to(torch_device) input_img_latents = [torch.randn((1, num_channels, height, width)).to(torch_device)] input_image_sizes = {0: [[0, 0 + height * width // 2 // 2]]} attn_seq_length = sequence_length + 1 + height * width // 2 // 2 attention_mask = torch.ones((batch_size, attn_seq_length, attn_seq_length)).to(torch_device) position_ids = torch.LongTensor([list(range(attn_seq_length))] * batch_size).to(torch_device) return { "hidden_states": hidden_states, "timestep": timestep, "input_ids": input_ids, "input_img_latents": input_img_latents, "input_image_sizes": input_image_sizes, "attention_mask": attention_mask, "position_ids": position_ids, } @property def input_shape(self): return (4, 8, 8) @property def output_shape(self): return (4, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "hidden_size": 16, "num_attention_heads": 4, "num_key_value_heads": 4, "intermediate_size": 32, "num_layers": 20, "pad_token_id": 0, "vocab_size": 1000, "in_channels": 4, "time_step_dim": 4, "rope_scaling": {"long_factor": list(range(1, 3)), "short_factor": list(range(1, 3))}, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"OmniGenTransformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_omnigen.py", "license": "Apache License 2.0", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/omnigen/test_pipeline_omnigen.py
import gc import unittest import numpy as np import torch from transformers import AutoTokenizer from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, OmniGenPipeline, OmniGenTransformer2DModel from ...testing_utils import ( Expectations, backend_empty_cache, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin class OmniGenPipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = OmniGenPipeline params = frozenset(["prompt", "guidance_scale"]) batch_params = frozenset(["prompt"]) test_xformers_attention = False test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) transformer = OmniGenTransformer2DModel( hidden_size=16, num_attention_heads=4, num_key_value_heads=4, intermediate_size=32, num_layers=1, in_channels=4, time_step_dim=4, rope_scaling={"long_factor": list(range(1, 3)), "short_factor": list(range(1, 3))}, ) torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4, 4, 4, 4), layers_per_block=1, latent_channels=4, norm_num_groups=1, up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], ) scheduler = FlowMatchEulerDiscreteScheduler(invert_sigmas=True, num_train_timesteps=1) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 1, "guidance_scale": 3.0, "output_type": "np", "height": 16, "width": 16, } return inputs def test_inference(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) generated_image = pipe(**inputs).images[0] self.assertEqual(generated_image.shape, (16, 16, 3)) @slow @require_torch_accelerator class OmniGenPipelineSlowTests(unittest.TestCase): pipeline_class = OmniGenPipeline repo_id = "shitao/OmniGen-v1-diffusers" def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) return { "prompt": "A photo of a cat", "num_inference_steps": 2, "guidance_scale": 2.5, "output_type": "np", "generator": generator, } def test_omnigen_inference(self): pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16) pipe.enable_model_cpu_offload() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] image_slice = image[0, :10, :10] expected_slices = Expectations( { ("xpu", 3): np.array( [ [0.05859375, 0.05859375, 0.04492188], [0.04882812, 0.04101562, 0.03320312], [0.04882812, 0.04296875, 0.03125], [0.04296875, 0.0390625, 0.03320312], [0.04296875, 0.03710938, 0.03125], [0.04492188, 0.0390625, 0.03320312], [0.04296875, 0.03710938, 0.03125], [0.04101562, 0.03710938, 0.02734375], [0.04101562, 0.03515625, 0.02734375], [0.04101562, 0.03515625, 0.02929688], ], dtype=np.float32, ), ("cuda", 7): np.array( [ [0.1783447, 0.16772744, 0.14339337], [0.17066911, 0.15521264, 0.13757327], [0.17072496, 0.15531206, 0.13524258], [0.16746324, 0.1564025, 0.13794944], [0.16490817, 0.15258026, 0.13697758], [0.16971767, 0.15826806, 0.13928896], [0.16782972, 0.15547255, 0.13783783], [0.16464645, 0.15281534, 0.13522372], [0.16535294, 0.15301755, 0.13526791], [0.16365296, 0.15092957, 0.13443318], ], dtype=np.float32, ), ("cuda", 8): np.array( [ [0.0546875, 0.05664062, 0.04296875], [0.046875, 0.04101562, 0.03320312], [0.05078125, 0.04296875, 0.03125], [0.04296875, 0.04101562, 0.03320312], [0.0390625, 0.03710938, 0.02929688], [0.04296875, 0.03710938, 0.03125], [0.0390625, 0.03710938, 0.02929688], [0.0390625, 0.03710938, 0.02734375], [0.0390625, 0.03320312, 0.02734375], [0.0390625, 0.03320312, 0.02734375], ], dtype=np.float32, ), } ) expected_slice = expected_slices.get_expectation() max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) assert max_diff < 1e-4
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/omnigen/test_pipeline_omnigen.py", "license": "Apache License 2.0", "lines": 156, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/hooks/pyramid_attention_broadcast.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from dataclasses import dataclass from typing import Any, Callable import torch from ..models.attention import AttentionModuleMixin from ..models.attention_processor import Attention, MochiAttention from ..utils import logging from ._common import ( _ATTENTION_CLASSES, _CROSS_TRANSFORMER_BLOCK_IDENTIFIERS, _SPATIAL_TRANSFORMER_BLOCK_IDENTIFIERS, _TEMPORAL_TRANSFORMER_BLOCK_IDENTIFIERS, ) from .hooks import HookRegistry, ModelHook logger = logging.get_logger(__name__) # pylint: disable=invalid-name _PYRAMID_ATTENTION_BROADCAST_HOOK = "pyramid_attention_broadcast" @dataclass class PyramidAttentionBroadcastConfig: r""" Configuration for Pyramid Attention Broadcast. Args: spatial_attention_block_skip_range (`int`, *optional*, defaults to `None`): The number of times a specific spatial attention broadcast is skipped before computing the attention states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e., old attention states will be reused) before computing the new attention states again. temporal_attention_block_skip_range (`int`, *optional*, defaults to `None`): The number of times a specific temporal attention broadcast is skipped before computing the attention states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e., old attention states will be reused) before computing the new attention states again. cross_attention_block_skip_range (`int`, *optional*, defaults to `None`): The number of times a specific cross-attention broadcast is skipped before computing the attention states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e., old attention states will be reused) before computing the new attention states again. spatial_attention_timestep_skip_range (`tuple[int, int]`, defaults to `(100, 800)`): The range of timesteps to skip in the spatial attention layer. The attention computations will be conditionally skipped if the current timestep is within the specified range. temporal_attention_timestep_skip_range (`tuple[int, int]`, defaults to `(100, 800)`): The range of timesteps to skip in the temporal attention layer. The attention computations will be conditionally skipped if the current timestep is within the specified range. cross_attention_timestep_skip_range (`tuple[int, int]`, defaults to `(100, 800)`): The range of timesteps to skip in the cross-attention layer. The attention computations will be conditionally skipped if the current timestep is within the specified range. spatial_attention_block_identifiers (`tuple[str, ...]`): The identifiers to match against the layer names to determine if the layer is a spatial attention layer. temporal_attention_block_identifiers (`tuple[str, ...]`): The identifiers to match against the layer names to determine if the layer is a temporal attention layer. cross_attention_block_identifiers (`tuple[str, ...]`): The identifiers to match against the layer names to determine if the layer is a cross-attention layer. """ spatial_attention_block_skip_range: int | None = None temporal_attention_block_skip_range: int | None = None cross_attention_block_skip_range: int | None = None spatial_attention_timestep_skip_range: tuple[int, int] = (100, 800) temporal_attention_timestep_skip_range: tuple[int, int] = (100, 800) cross_attention_timestep_skip_range: tuple[int, int] = (100, 800) spatial_attention_block_identifiers: tuple[str, ...] = _SPATIAL_TRANSFORMER_BLOCK_IDENTIFIERS temporal_attention_block_identifiers: tuple[str, ...] = _TEMPORAL_TRANSFORMER_BLOCK_IDENTIFIERS cross_attention_block_identifiers: tuple[str, ...] = _CROSS_TRANSFORMER_BLOCK_IDENTIFIERS current_timestep_callback: Callable[[], int] = None # TODO(aryan): add PAB for MLP layers (very limited speedup from testing with original codebase # so not added for now) def __repr__(self) -> str: return ( f"PyramidAttentionBroadcastConfig(\n" f" spatial_attention_block_skip_range={self.spatial_attention_block_skip_range},\n" f" temporal_attention_block_skip_range={self.temporal_attention_block_skip_range},\n" f" cross_attention_block_skip_range={self.cross_attention_block_skip_range},\n" f" spatial_attention_timestep_skip_range={self.spatial_attention_timestep_skip_range},\n" f" temporal_attention_timestep_skip_range={self.temporal_attention_timestep_skip_range},\n" f" cross_attention_timestep_skip_range={self.cross_attention_timestep_skip_range},\n" f" spatial_attention_block_identifiers={self.spatial_attention_block_identifiers},\n" f" temporal_attention_block_identifiers={self.temporal_attention_block_identifiers},\n" f" cross_attention_block_identifiers={self.cross_attention_block_identifiers},\n" f" current_timestep_callback={self.current_timestep_callback}\n" ")" ) class PyramidAttentionBroadcastState: r""" State for Pyramid Attention Broadcast. Attributes: iteration (`int`): The current iteration of the Pyramid Attention Broadcast. It is necessary to ensure that `reset_state` is called before starting a new inference forward pass for PAB to work correctly. cache (`Any`): The cached output from the previous forward pass. This is used to re-use the attention states when the attention computation is skipped. It is either a tensor or a tuple of tensors, depending on the module. """ def __init__(self) -> None: self.iteration = 0 self.cache = None def reset(self): self.iteration = 0 self.cache = None def __repr__(self): cache_repr = "" if self.cache is None: cache_repr = "None" else: cache_repr = f"Tensor(shape={self.cache.shape}, dtype={self.cache.dtype})" return f"PyramidAttentionBroadcastState(iteration={self.iteration}, cache={cache_repr})" class PyramidAttentionBroadcastHook(ModelHook): r"""A hook that applies Pyramid Attention Broadcast to a given module.""" _is_stateful = True def __init__( self, timestep_skip_range: tuple[int, int], block_skip_range: int, current_timestep_callback: Callable[[], int] ) -> None: super().__init__() self.timestep_skip_range = timestep_skip_range self.block_skip_range = block_skip_range self.current_timestep_callback = current_timestep_callback def initialize_hook(self, module): self.state = PyramidAttentionBroadcastState() return module def new_forward(self, module: torch.nn.Module, *args, **kwargs) -> Any: is_within_timestep_range = ( self.timestep_skip_range[0] < self.current_timestep_callback() < self.timestep_skip_range[1] ) should_compute_attention = ( self.state.cache is None or self.state.iteration == 0 or not is_within_timestep_range or self.state.iteration % self.block_skip_range == 0 ) if should_compute_attention: output = self.fn_ref.original_forward(*args, **kwargs) else: output = self.state.cache self.state.cache = output self.state.iteration += 1 return output def reset_state(self, module: torch.nn.Module) -> None: self.state.reset() return module def apply_pyramid_attention_broadcast(module: torch.nn.Module, config: PyramidAttentionBroadcastConfig): r""" Apply [Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) to a given pipeline. PAB is an attention approximation method that leverages the similarity in attention states between timesteps to reduce the computational cost of attention computation. The key takeaway from the paper is that the attention similarity in the cross-attention layers between timesteps is high, followed by less similarity in the temporal and spatial layers. This allows for the skipping of attention computation in the cross-attention layers more frequently than in the temporal and spatial layers. Applying PAB will, therefore, speedup the inference process. Args: module (`torch.nn.Module`): The module to apply Pyramid Attention Broadcast to. config (`PyramidAttentionBroadcastConfig | None`, `optional`, defaults to `None`): The configuration to use for Pyramid Attention Broadcast. Example: ```python >>> import torch >>> from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast >>> from diffusers.utils import export_to_video >>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> config = PyramidAttentionBroadcastConfig( ... spatial_attention_block_skip_range=2, ... spatial_attention_timestep_skip_range=(100, 800), ... current_timestep_callback=lambda: pipe.current_timestep, ... ) >>> apply_pyramid_attention_broadcast(pipe.transformer, config) ``` """ if config.current_timestep_callback is None: raise ValueError( "The `current_timestep_callback` function must be provided in the configuration to apply Pyramid Attention Broadcast." ) if ( config.spatial_attention_block_skip_range is None and config.temporal_attention_block_skip_range is None and config.cross_attention_block_skip_range is None ): logger.warning( "Pyramid Attention Broadcast requires one or more of `spatial_attention_block_skip_range`, `temporal_attention_block_skip_range` " "or `cross_attention_block_skip_range` parameters to be set to an integer, not `None`. Defaulting to using `spatial_attention_block_skip_range=2`. " "To avoid this warning, please set one of the above parameters." ) config.spatial_attention_block_skip_range = 2 for name, submodule in module.named_modules(): if not isinstance(submodule, (*_ATTENTION_CLASSES, AttentionModuleMixin)): # PAB has been implemented specific to Diffusers' Attention classes. However, this does not mean that PAB # cannot be applied to this layer. For custom layers, users can extend this functionality and implement # their own PAB logic similar to `_apply_pyramid_attention_broadcast_on_attention_class`. continue _apply_pyramid_attention_broadcast_on_attention_class(name, submodule, config) def _apply_pyramid_attention_broadcast_on_attention_class( name: str, module: Attention, config: PyramidAttentionBroadcastConfig ) -> bool: is_spatial_self_attention = ( any(re.search(identifier, name) is not None for identifier in config.spatial_attention_block_identifiers) and config.spatial_attention_block_skip_range is not None and not getattr(module, "is_cross_attention", False) ) is_temporal_self_attention = ( any(re.search(identifier, name) is not None for identifier in config.temporal_attention_block_identifiers) and config.temporal_attention_block_skip_range is not None and not getattr(module, "is_cross_attention", False) ) is_cross_attention = ( any(re.search(identifier, name) is not None for identifier in config.cross_attention_block_identifiers) and config.cross_attention_block_skip_range is not None and getattr(module, "is_cross_attention", False) ) block_skip_range, timestep_skip_range, block_type = None, None, None if is_spatial_self_attention: block_skip_range = config.spatial_attention_block_skip_range timestep_skip_range = config.spatial_attention_timestep_skip_range block_type = "spatial" elif is_temporal_self_attention: block_skip_range = config.temporal_attention_block_skip_range timestep_skip_range = config.temporal_attention_timestep_skip_range block_type = "temporal" elif is_cross_attention: block_skip_range = config.cross_attention_block_skip_range timestep_skip_range = config.cross_attention_timestep_skip_range block_type = "cross" if block_skip_range is None or timestep_skip_range is None: logger.info( f'Unable to apply Pyramid Attention Broadcast to the selected layer: "{name}" because it does ' f"not match any of the required criteria for spatial, temporal or cross attention layers. Note, " f"however, that this layer may still be valid for applying PAB. Please specify the correct " f"block identifiers in the configuration." ) return False logger.debug(f"Enabling Pyramid Attention Broadcast ({block_type}) in layer: {name}") _apply_pyramid_attention_broadcast_hook( module, timestep_skip_range, block_skip_range, config.current_timestep_callback ) return True def _apply_pyramid_attention_broadcast_hook( module: Attention | MochiAttention, timestep_skip_range: tuple[int, int], block_skip_range: int, current_timestep_callback: Callable[[], int], ): r""" Apply [Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) to a given torch.nn.Module. Args: module (`torch.nn.Module`): The module to apply Pyramid Attention Broadcast to. timestep_skip_range (`tuple[int, int]`): The range of timesteps to skip in the attention layer. The attention computations will be conditionally skipped if the current timestep is within the specified range. block_skip_range (`int`): The number of times a specific attention broadcast is skipped before computing the attention states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e., old attention states will be reused) before computing the new attention states again. current_timestep_callback (`Callable[[], int]`): A callback function that returns the current inference timestep. """ registry = HookRegistry.check_if_exists_or_initialize(module) hook = PyramidAttentionBroadcastHook(timestep_skip_range, block_skip_range, current_timestep_callback) registry.register_hook(hook, _PYRAMID_ATTENTION_BROADCAST_HOOK)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/hooks/pyramid_attention_broadcast.py", "license": "Apache License 2.0", "lines": 264, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/models/cache_utils.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import contextmanager from ..utils.logging import get_logger logger = get_logger(__name__) # pylint: disable=invalid-name class CacheMixin: r""" A class for enable/disabling caching techniques on diffusion models. Supported caching techniques: - [Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) - [FasterCache](https://huggingface.co/papers/2410.19355) - [FirstBlockCache](https://github.com/chengzeyi/ParaAttention/blob/7a266123671b55e7e5a2fe9af3121f07a36afc78/README.md#first-block-cache-our-dynamic-caching) """ _cache_config = None @property def is_cache_enabled(self) -> bool: return self._cache_config is not None def enable_cache(self, config) -> None: r""" Enable caching techniques on the model. Args: config (`PyramidAttentionBroadcastConfig | FasterCacheConfig | FirstBlockCacheConfig`): The configuration for applying the caching technique. Currently supported caching techniques are: - [`~hooks.PyramidAttentionBroadcastConfig`] - [`~hooks.FasterCacheConfig`] - [`~hooks.FirstBlockCacheConfig`] Example: ```python >>> import torch >>> from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig >>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> config = PyramidAttentionBroadcastConfig( ... spatial_attention_block_skip_range=2, ... spatial_attention_timestep_skip_range=(100, 800), ... current_timestep_callback=lambda: pipe.current_timestep, ... ) >>> pipe.transformer.enable_cache(config) ``` """ from ..hooks import ( FasterCacheConfig, FirstBlockCacheConfig, MagCacheConfig, PyramidAttentionBroadcastConfig, TaylorSeerCacheConfig, apply_faster_cache, apply_first_block_cache, apply_mag_cache, apply_pyramid_attention_broadcast, apply_taylorseer_cache, ) if self.is_cache_enabled: raise ValueError( f"Caching has already been enabled with {type(self._cache_config)}. To apply a new caching technique, please disable the existing one first." ) if isinstance(config, FasterCacheConfig): apply_faster_cache(self, config) elif isinstance(config, FirstBlockCacheConfig): apply_first_block_cache(self, config) elif isinstance(config, MagCacheConfig): apply_mag_cache(self, config) elif isinstance(config, PyramidAttentionBroadcastConfig): apply_pyramid_attention_broadcast(self, config) elif isinstance(config, TaylorSeerCacheConfig): apply_taylorseer_cache(self, config) else: raise ValueError(f"Cache config {type(config)} is not supported.") self._cache_config = config def disable_cache(self) -> None: from ..hooks import ( FasterCacheConfig, FirstBlockCacheConfig, HookRegistry, MagCacheConfig, PyramidAttentionBroadcastConfig, TaylorSeerCacheConfig, ) from ..hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK from ..hooks.first_block_cache import _FBC_BLOCK_HOOK, _FBC_LEADER_BLOCK_HOOK from ..hooks.mag_cache import _MAG_CACHE_BLOCK_HOOK, _MAG_CACHE_LEADER_BLOCK_HOOK from ..hooks.pyramid_attention_broadcast import _PYRAMID_ATTENTION_BROADCAST_HOOK from ..hooks.taylorseer_cache import _TAYLORSEER_CACHE_HOOK if self._cache_config is None: logger.warning("Caching techniques have not been enabled, so there's nothing to disable.") return registry = HookRegistry.check_if_exists_or_initialize(self) if isinstance(self._cache_config, FasterCacheConfig): registry.remove_hook(_FASTER_CACHE_DENOISER_HOOK, recurse=True) registry.remove_hook(_FASTER_CACHE_BLOCK_HOOK, recurse=True) elif isinstance(self._cache_config, FirstBlockCacheConfig): registry.remove_hook(_FBC_LEADER_BLOCK_HOOK, recurse=True) registry.remove_hook(_FBC_BLOCK_HOOK, recurse=True) elif isinstance(self._cache_config, MagCacheConfig): registry.remove_hook(_MAG_CACHE_LEADER_BLOCK_HOOK, recurse=True) registry.remove_hook(_MAG_CACHE_BLOCK_HOOK, recurse=True) elif isinstance(self._cache_config, PyramidAttentionBroadcastConfig): registry.remove_hook(_PYRAMID_ATTENTION_BROADCAST_HOOK, recurse=True) elif isinstance(self._cache_config, TaylorSeerCacheConfig): registry.remove_hook(_TAYLORSEER_CACHE_HOOK, recurse=True) else: raise ValueError(f"Cache config {type(self._cache_config)} is not supported.") self._cache_config = None def _reset_stateful_cache(self, recurse: bool = True) -> None: from ..hooks import HookRegistry HookRegistry.check_if_exists_or_initialize(self).reset_stateful_hooks(recurse=recurse) @contextmanager def cache_context(self, name: str): r"""Context manager that provides additional methods for cache management.""" from ..hooks import HookRegistry registry = HookRegistry.check_if_exists_or_initialize(self) registry._set_context(name) yield registry._set_context(None)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/cache_utils.py", "license": "Apache License 2.0", "lines": 125, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/hooks/test_hooks.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from diffusers.hooks import HookRegistry, ModelHook from diffusers.training_utils import free_memory from diffusers.utils.logging import get_logger from ..testing_utils import CaptureLogger, torch_device logger = get_logger(__name__) # pylint: disable=invalid-name class DummyBlock(torch.nn.Module): def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None: super().__init__() self.proj_in = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.proj_out = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj_in(x) x = self.activation(x) x = self.proj_out(x) return x class DummyModel(torch.nn.Module): def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: super().__init__() self.linear_1 = torch.nn.Linear(in_features, hidden_features) self.activation = torch.nn.ReLU() self.blocks = torch.nn.ModuleList( [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] ) self.linear_2 = torch.nn.Linear(hidden_features, out_features) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_1(x) x = self.activation(x) for block in self.blocks: x = block(x) x = self.linear_2(x) return x class AddHook(ModelHook): def __init__(self, value: int): super().__init__() self.value = value def pre_forward(self, module: torch.nn.Module, *args, **kwargs): logger.debug("AddHook pre_forward") args = ((x + self.value) if torch.is_tensor(x) else x for x in args) return args, kwargs def post_forward(self, module, output): logger.debug("AddHook post_forward") return output class MultiplyHook(ModelHook): def __init__(self, value: int): super().__init__() self.value = value def pre_forward(self, module, *args, **kwargs): logger.debug("MultiplyHook pre_forward") args = ((x * self.value) if torch.is_tensor(x) else x for x in args) return args, kwargs def post_forward(self, module, output): logger.debug("MultiplyHook post_forward") return output def __repr__(self): return f"MultiplyHook(value={self.value})" class StatefulAddHook(ModelHook): _is_stateful = True def __init__(self, value: int): super().__init__() self.value = value self.increment = 0 def pre_forward(self, module, *args, **kwargs): logger.debug("StatefulAddHook pre_forward") add_value = self.value + self.increment self.increment += 1 args = ((x + add_value) if torch.is_tensor(x) else x for x in args) return args, kwargs def reset_state(self, module): self.increment = 0 class SkipLayerHook(ModelHook): def __init__(self, skip_layer: bool): super().__init__() self.skip_layer = skip_layer def pre_forward(self, module, *args, **kwargs): logger.debug("SkipLayerHook pre_forward") return args, kwargs def new_forward(self, module, *args, **kwargs): logger.debug("SkipLayerHook new_forward") if self.skip_layer: return args[0] return self.fn_ref.original_forward(*args, **kwargs) def post_forward(self, module, output): logger.debug("SkipLayerHook post_forward") return output class HookTests(unittest.TestCase): in_features = 4 hidden_features = 8 out_features = 4 num_layers = 2 def setUp(self): params = self.get_module_parameters() self.model = DummyModel(**params) self.model.to(torch_device) def tearDown(self): super().tearDown() del self.model gc.collect() free_memory() def get_module_parameters(self): return { "in_features": self.in_features, "hidden_features": self.hidden_features, "out_features": self.out_features, "num_layers": self.num_layers, } def get_generator(self): return torch.manual_seed(0) def test_hook_registry(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(AddHook(1), "add_hook") registry.register_hook(MultiplyHook(2), "multiply_hook") registry_repr = repr(registry) expected_repr = "HookRegistry(\n (0) add_hook - AddHook\n (1) multiply_hook - MultiplyHook(value=2)\n)" self.assertEqual(len(registry.hooks), 2) self.assertEqual(registry._hook_order, ["add_hook", "multiply_hook"]) self.assertEqual(registry_repr, expected_repr) registry.remove_hook("add_hook") self.assertEqual(len(registry.hooks), 1) self.assertEqual(registry._hook_order, ["multiply_hook"]) def test_stateful_hook(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(StatefulAddHook(1), "stateful_add_hook") self.assertEqual(registry.hooks["stateful_add_hook"].increment, 0) input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) num_repeats = 3 for i in range(num_repeats): result = self.model(input) if i == 0: output1 = result self.assertEqual(registry.get_hook("stateful_add_hook").increment, num_repeats) registry.reset_stateful_hooks() output2 = self.model(input) self.assertEqual(registry.get_hook("stateful_add_hook").increment, 1) self.assertTrue(torch.allclose(output1, output2)) def test_inference(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(AddHook(1), "add_hook") registry.register_hook(MultiplyHook(2), "multiply_hook") input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) output1 = self.model(input).mean().detach().cpu().item() registry.remove_hook("multiply_hook") new_input = input * 2 output2 = self.model(new_input).mean().detach().cpu().item() registry.remove_hook("add_hook") new_input = input * 2 + 1 output3 = self.model(new_input).mean().detach().cpu().item() self.assertAlmostEqual(output1, output2, places=5) self.assertAlmostEqual(output1, output3, places=5) self.assertAlmostEqual(output2, output3, places=5) def test_skip_layer_hook(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") input = torch.zeros(1, 4, device=torch_device) output = self.model(input).mean().detach().cpu().item() self.assertEqual(output, 0.0) registry.remove_hook("skip_layer_hook") registry.register_hook(SkipLayerHook(skip_layer=False), "skip_layer_hook") output = self.model(input).mean().detach().cpu().item() self.assertNotEqual(output, 0.0) def test_skip_layer_internal_block(self): registry = HookRegistry.check_if_exists_or_initialize(self.model.linear_1) input = torch.zeros(1, 4, device=torch_device) registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") with self.assertRaises(RuntimeError) as cm: self.model(input).mean().detach().cpu().item() self.assertIn("mat1 and mat2 shapes cannot be multiplied", str(cm.exception)) registry.remove_hook("skip_layer_hook") output = self.model(input).mean().detach().cpu().item() self.assertNotEqual(output, 0.0) registry = HookRegistry.check_if_exists_or_initialize(self.model.blocks[1]) registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") output = self.model(input).mean().detach().cpu().item() self.assertNotEqual(output, 0.0) def test_invocation_order_stateful_first(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(StatefulAddHook(1), "add_hook") registry.register_hook(AddHook(2), "add_hook_2") registry.register_hook(MultiplyHook(3), "multiply_hook") input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) logger = get_logger(__name__) logger.setLevel("DEBUG") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ( "MultiplyHook pre_forward\n" "AddHook pre_forward\n" "StatefulAddHook pre_forward\n" "AddHook post_forward\n" "MultiplyHook post_forward\n" ) .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) registry.remove_hook("add_hook") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ("MultiplyHook pre_forward\nAddHook pre_forward\nAddHook post_forward\nMultiplyHook post_forward\n") .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) def test_invocation_order_stateful_middle(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(AddHook(2), "add_hook") registry.register_hook(StatefulAddHook(1), "add_hook_2") registry.register_hook(MultiplyHook(3), "multiply_hook") input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) logger = get_logger(__name__) logger.setLevel("DEBUG") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ( "MultiplyHook pre_forward\n" "StatefulAddHook pre_forward\n" "AddHook pre_forward\n" "AddHook post_forward\n" "MultiplyHook post_forward\n" ) .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) registry.remove_hook("add_hook") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ("MultiplyHook pre_forward\nStatefulAddHook pre_forward\nMultiplyHook post_forward\n") .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) registry.remove_hook("add_hook_2") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ("MultiplyHook pre_forward\nMultiplyHook post_forward\n").replace(" ", "").replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) def test_invocation_order_stateful_last(self): registry = HookRegistry.check_if_exists_or_initialize(self.model) registry.register_hook(AddHook(1), "add_hook") registry.register_hook(MultiplyHook(2), "multiply_hook") registry.register_hook(StatefulAddHook(3), "add_hook_2") input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) logger = get_logger(__name__) logger.setLevel("DEBUG") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ( "StatefulAddHook pre_forward\n" "MultiplyHook pre_forward\n" "AddHook pre_forward\n" "AddHook post_forward\n" "MultiplyHook post_forward\n" ) .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log) registry.remove_hook("add_hook") with CaptureLogger(logger) as cap_logger: self.model(input) output = cap_logger.out.replace(" ", "").replace("\n", "") expected_invocation_order_log = ( ("StatefulAddHook pre_forward\nMultiplyHook pre_forward\nMultiplyHook post_forward\n") .replace(" ", "") .replace("\n", "") ) self.assertEqual(output, expected_invocation_order_log)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/hooks/test_hooks.py", "license": "Apache License 2.0", "lines": 297, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/dreambooth/convert_to_imagefolder.py
import argparse import json import pathlib parser = argparse.ArgumentParser() parser.add_argument( "--path", type=str, required=True, help="Path to folder with image-text pairs.", ) parser.add_argument("--caption_column", type=str, default="prompt", help="Name of caption column.") args = parser.parse_args() path = pathlib.Path(args.path) if not path.exists(): raise RuntimeError(f"`--path` '{args.path}' does not exist.") all_files = list(path.glob("*")) captions = list(path.glob("*.txt")) images = set(all_files) - set(captions) images = {image.stem: image for image in images} caption_image = {caption: images.get(caption.stem) for caption in captions if images.get(caption.stem)} metadata = path.joinpath("metadata.jsonl") with metadata.open("w", encoding="utf-8") as f: for caption, image in caption_image.items(): caption_text = caption.read_text(encoding="utf-8") json.dump({"file_name": image.name, args.caption_column: caption_text}, f) f.write("\n")
{ "repo_id": "huggingface/diffusers", "file_path": "examples/dreambooth/convert_to_imagefolder.py", "license": "Apache License 2.0", "lines": 26, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:examples/community/pipeline_flux_semantic_guidance.py
# Copyright 2025 Black Forest Labs and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, T5EncoderModel, T5TokenizerFast, ) from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import FluxIPAdapterMixin, FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin from diffusers.models.autoencoders import AutoencoderKL from diffusers.models.transformers import FluxTransformer2DModel from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained( >>> "black-forest-labs/FLUX.1-dev", >>> custom_pipeline="pipeline_flux_semantic_guidance", >>> torch_dtype=torch.bfloat16 >>> ) >>> pipe.to("cuda") >>> prompt = "A cat holding a sign that says hello world" >>> image = pipe( >>> prompt=prompt, >>> num_inference_steps=28, >>> guidance_scale=3.5, >>> editing_prompt=["cat", "dog"], # changes from cat to dog. >>> reverse_editing_direction=[True, False], >>> edit_warmup_steps=[6, 8], >>> edit_guidance_scale=[6, 6.5], >>> edit_threshold=[0.89, 0.89], >>> edit_cooldown_steps = [25, 27], >>> edit_momentum_scale=0.3, >>> edit_mom_beta=0.6, >>> generator=torch.Generator(device="cuda").manual_seed(6543), >>> ).images[0] >>> image.save("semantic_flux.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class FluxSemanticGuidancePipeline( DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin, FluxIPAdapterMixin, ): r""" The Flux pipeline for text-to-image generation with semantic guidance. Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ Args: transformer ([`FluxTransformer2DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->transformer->vae" _optional_components = ["image_encoder", "feature_extractor"] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, text_encoder_2: T5EncoderModel, tokenizer_2: T5TokenizerFast, transformer: FluxTransformer2DModel, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.tokenizer_max_length = ( self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 ) self.default_sample_size = 128 # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_images_per_prompt: int = 1, max_sequence_length: int = 512, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2) text_inputs = self.tokenizer_2( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_length=False, return_overflowing_tokens=False, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] dtype = self.text_encoder_2.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) _, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_clip_prompt_embeds def _get_clip_prompt_embeds( self, prompt: Union[str, List[str]], num_images_per_prompt: int = 1, device: Optional[torch.device] = None, ): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer_max_length, truncation=True, return_overflowing_tokens=False, return_length=False, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) # Use pooled output of CLIPTextModel prompt_embeds = prompt_embeds.pooler_output prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) return prompt_embeds # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], device: Optional[torch.device] = None, num_images_per_prompt: int = 1, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, max_sequence_length: int = 512, lora_scale: Optional[float] = None, ): r""" Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in all text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # We only use the pooled prompt output from the CLIPTextModel pooled_prompt_embeds = self._get_clip_prompt_embeds( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, ) prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt_2, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, ) if self.text_encoder is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) return prompt_embeds, pooled_prompt_embeds, text_ids def encode_text_with_editing( self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], pooled_prompt_embeds: Optional[torch.FloatTensor] = None, editing_prompt: Optional[List[str]] = None, editing_prompt_2: Optional[List[str]] = None, editing_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_editing_prompt_embeds: Optional[torch.FloatTensor] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, max_sequence_length: int = 512, lora_scale: Optional[float] = None, ): """ Encode text prompts with editing prompts and negative prompts for semantic guidance. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. prompt_2 (`str` or `List[str]`): The prompt or prompts to guide image generation for second tokenizer. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. editing_prompt (`str` or `List[str]`, *optional*): The editing prompts for semantic guidance. editing_prompt_2 (`str` or `List[str]`, *optional*): The editing prompts for semantic guidance for second tokenizer. editing_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-computed embeddings for editing prompts. pooled_editing_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-computed pooled embeddings for editing prompts. device (`torch.device`, *optional*): The device to use for computation. num_images_per_prompt (`int`, defaults to 1): Number of images to generate per prompt. max_sequence_length (`int`, defaults to 512): Maximum sequence length for text encoding. lora_scale (`float`, *optional*): Scale factor for LoRA layers if used. Returns: tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, int]: A tuple containing the prompt embeddings, pooled prompt embeddings, text IDs, and number of enabled editing prompts. """ device = device or self._execution_device if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError("Prompt must be provided as string or list of strings") # Get base prompt embeddings prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale, ) # Handle editing prompts if editing_prompt_embeds is not None: enabled_editing_prompts = int(editing_prompt_embeds.shape[0]) edit_text_ids = [] elif editing_prompt is not None: editing_prompt_embeds = [] pooled_editing_prompt_embeds = [] edit_text_ids = [] editing_prompt_2 = editing_prompt if editing_prompt_2 is None else editing_prompt_2 for edit_1, edit_2 in zip(editing_prompt, editing_prompt_2): e_prompt_embeds, pooled_embeds, e_ids = self.encode_prompt( prompt=edit_1, prompt_2=edit_2, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale, ) editing_prompt_embeds.append(e_prompt_embeds) pooled_editing_prompt_embeds.append(pooled_embeds) edit_text_ids.append(e_ids) enabled_editing_prompts = len(editing_prompt) else: edit_text_ids = [] enabled_editing_prompts = 0 if enabled_editing_prompts: for idx in range(enabled_editing_prompts): editing_prompt_embeds[idx] = torch.cat([editing_prompt_embeds[idx]] * batch_size, dim=0) pooled_editing_prompt_embeds[idx] = torch.cat([pooled_editing_prompt_embeds[idx]] * batch_size, dim=0) return ( prompt_embeds, pooled_prompt_embeds, editing_prompt_embeds, pooled_editing_prompt_embeds, text_ids, edit_text_ids, enabled_editing_prompts, ) # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) return image_embeds # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt ): image_embeds = [] if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.transformer.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.transformer.encoder_hid_proj.image_projection_layers)} IP Adapters." ) for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.transformer.encoder_hid_proj.image_projection_layers ): single_image_embeds = self.encode_image(single_ip_adapter_image, device, 1) image_embeds.append(single_image_embeds[None, :]) else: for single_image_embeds in ip_adapter_image_embeds: image_embeds.append(single_image_embeds) ip_adapter_image_embeds = [] for i, single_image_embeds in enumerate(image_embeds): single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) single_image_embeds = single_image_embeds.to(device=device) ip_adapter_image_embeds.append(single_image_embeds) return ip_adapter_image_embeds # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.check_inputs def check_inputs( self, prompt, prompt_2, height, width, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height, width, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape( latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents def _unpack_latents(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (vae_scale_factor * 2)) width = 2 * (int(width) // (vae_scale_factor * 2)) latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height, width) return latents # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.enable_vae_tiling def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`." deprecate( "enable_vae_tiling", "0.40.0", depr_message, ) self.vae.enable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`." deprecate( "disable_vae_tiling", "0.40.0", depr_message, ) self.vae.disable_tiling() # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is not None: latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) return latents.to(device=device, dtype=dtype), latent_image_ids if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) return latents, latent_image_ids @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, negative_prompt: Union[str, List[str]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, true_cfg_scale: float = 1.0, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 28, sigmas: Optional[List[float]] = None, guidance_scale: float = 3.5, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, negative_ip_adapter_image: Optional[PipelineImageInput] = None, negative_ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, editing_prompt: Optional[Union[str, List[str]]] = None, editing_prompt_2: Optional[Union[str, List[str]]] = None, editing_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_editing_prompt_embeds: Optional[torch.FloatTensor] = None, reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, edit_guidance_scale: Optional[Union[float, List[float]]] = 5, edit_warmup_steps: Optional[Union[int, List[int]]] = 8, edit_cooldown_steps: Optional[Union[int, List[int]]] = None, edit_threshold: Optional[Union[float, List[float]]] = 0.9, edit_momentum_scale: Optional[float] = 0.1, edit_mom_beta: Optional[float] = 0.4, edit_weights: Optional[List[float]] = None, sem_guidance: Optional[List[torch.Tensor]] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is will be used instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. true_cfg_scale (`float`, *optional*, defaults to 1.0): When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. negative_ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. negative_ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. editing_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image editing. If not defined, no editing will be performed. editing_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image editing. If not defined, will use editing_prompt instead. editing_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings for editing. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `editing_prompt` input argument. reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): Whether to reverse the editing direction for each editing prompt. edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): Guidance scale for the editing process. If provided as a list, each value corresponds to an editing prompt. edit_warmup_steps (`int` or `List[int]`, *optional*, defaults to 10): Number of warmup steps for editing guidance. If provided as a list, each value corresponds to an editing prompt. edit_cooldown_steps (`int` or `List[int]`, *optional*, defaults to None): Number of cooldown steps for editing guidance. If provided as a list, each value corresponds to an editing prompt. edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): Threshold for editing guidance. If provided as a list, each value corresponds to an editing prompt. edit_momentum_scale (`float`, *optional*, defaults to 0.1): Scale of momentum to be added to the editing guidance at each diffusion step. edit_mom_beta (`float`, *optional*, defaults to 0.4): Beta value for momentum calculation in editing guidance. edit_weights (`List[float]`, *optional*): Weights for each editing prompt. sem_guidance (`List[torch.Tensor]`, *optional*): Pre-generated semantic guidance. If provided, it will be used instead of calculating guidance from editing prompts. Examples: Returns: [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeds is not None: enable_edit_guidance = True enabled_editing_prompts = editing_prompt_embeds.shape[0] else: enabled_editing_prompts = 0 enable_edit_guidance = False has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None ) do_true_cfg = true_cfg_scale > 1 and has_neg_prompt device = self._execution_device lora_scale = ( self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None ) ( prompt_embeds, pooled_prompt_embeds, editing_prompts_embeds, pooled_editing_prompt_embeds, text_ids, edit_text_ids, enabled_editing_prompts, ) = self.encode_text_with_editing( prompt=prompt, prompt_2=prompt_2, pooled_prompt_embeds=pooled_prompt_embeds, editing_prompt=editing_prompt, editing_prompt_2=editing_prompt_2, pooled_editing_prompt_embeds=pooled_editing_prompt_embeds, lora_scale=lora_scale, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, ) if do_true_cfg: ( negative_prompt_embeds, negative_pooled_prompt_embeds, _, ) = self.encode_prompt( prompt=negative_prompt, prompt_2=negative_prompt_2, prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale, ) negative_prompt_embeds = torch.cat([negative_prompt_embeds] * batch_size, dim=0) negative_pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds] * batch_size, dim=0) # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels // 4 latents, latent_image_ids = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 5. Prepare timesteps sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas image_seq_len = latents.shape[1] mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) edit_momentum = None if edit_warmup_steps: tmp_e_warmup_steps = edit_warmup_steps if isinstance(edit_warmup_steps, list) else [edit_warmup_steps] min_edit_warmup_steps = min(tmp_e_warmup_steps) else: min_edit_warmup_steps = 0 if edit_cooldown_steps: tmp_e_cooldown_steps = ( edit_cooldown_steps if isinstance(edit_cooldown_steps, list) else [edit_cooldown_steps] ) max_edit_cooldown_steps = min(max(tmp_e_cooldown_steps), num_inference_steps) else: max_edit_cooldown_steps = num_inference_steps # handle guidance if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) else: guidance = None if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) and ( negative_ip_adapter_image is None and negative_ip_adapter_image_embeds is None ): negative_ip_adapter_image = np.zeros((width, height, 3), dtype=np.uint8) elif (ip_adapter_image is None and ip_adapter_image_embeds is None) and ( negative_ip_adapter_image is not None or negative_ip_adapter_image_embeds is not None ): ip_adapter_image = np.zeros((width, height, 3), dtype=np.uint8) if self.joint_attention_kwargs is None: self._joint_attention_kwargs = {} image_embeds = None negative_image_embeds = None if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, ) if negative_ip_adapter_image is not None or negative_ip_adapter_image_embeds is not None: negative_image_embeds = self.prepare_ip_adapter_image_embeds( negative_ip_adapter_image, negative_ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, ) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue if image_embeds is not None: self._joint_attention_kwargs["ip_adapter_image_embeds"] = image_embeds # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) # handle guidance if self.transformer.config.guidance_embeds: guidance = torch.tensor([guidance_scale], device=device) guidance = guidance.expand(latents.shape[0]) else: guidance = None noise_pred = self.transformer( hidden_states=latents, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, )[0] if enable_edit_guidance and max_edit_cooldown_steps >= i >= min_edit_warmup_steps: noise_pred_edit_concepts = [] for e_embed, pooled_e_embed, e_text_id in zip( editing_prompts_embeds, pooled_editing_prompt_embeds, edit_text_ids ): noise_pred_edit = self.transformer( hidden_states=latents, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_e_embed, encoder_hidden_states=e_embed, txt_ids=e_text_id, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, )[0] noise_pred_edit_concepts.append(noise_pred_edit) if do_true_cfg: if negative_image_embeds is not None: self._joint_attention_kwargs["ip_adapter_image_embeds"] = negative_image_embeds noise_pred_uncond = self.transformer( hidden_states=latents, timestep=timestep / 1000, guidance=guidance, pooled_projections=negative_pooled_prompt_embeds, encoder_hidden_states=negative_prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, )[0] noise_guidance = true_cfg_scale * (noise_pred - noise_pred_uncond) else: noise_pred_uncond = noise_pred noise_guidance = noise_pred if edit_momentum is None: edit_momentum = torch.zeros_like(noise_guidance) if enable_edit_guidance and max_edit_cooldown_steps >= i >= min_edit_warmup_steps: concept_weights = torch.zeros( (enabled_editing_prompts, noise_guidance.shape[0]), device=device, dtype=noise_guidance.dtype, ) noise_guidance_edit = torch.zeros( (enabled_editing_prompts, *noise_guidance.shape), device=device, dtype=noise_guidance.dtype, ) warmup_inds = [] for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if edit_weights: edit_weight_c = edit_weights[c] else: edit_weight_c = 1.0 if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_warmup_steps_c: warmup_inds.append(c) if i >= edit_cooldown_steps_c: noise_guidance_edit[c, :, :, :] = torch.zeros_like(noise_pred_edit_concept) continue if do_true_cfg: noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond else: # simple sega noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred tmp_weights = (noise_guidance - noise_pred_edit_concept).sum(dim=(1, 2)) tmp_weights = torch.full_like(tmp_weights, edit_weight_c) # * (1 / enabled_editing_prompts) if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 concept_weights[c, :] = tmp_weights noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c # torch.quantile function expects float32 if noise_guidance_edit_tmp.dtype == torch.float32: tmp = torch.quantile( torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp.dtype) noise_guidance_edit_tmp = torch.where( torch.abs(noise_guidance_edit_tmp) >= tmp[:, :, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp), ) noise_guidance_edit[c, :, :, :] = noise_guidance_edit_tmp warmup_inds = torch.tensor(warmup_inds).to(device) if len(noise_pred_edit_concepts) > warmup_inds.shape[0] > 0: concept_weights = concept_weights.to("cpu") # Offload to cpu noise_guidance_edit = noise_guidance_edit.to("cpu") concept_weights_tmp = torch.index_select(concept_weights.to(device), 0, warmup_inds) concept_weights_tmp = torch.where( concept_weights_tmp < 0, torch.zeros_like(concept_weights_tmp), concept_weights_tmp ) concept_weights_tmp = concept_weights_tmp / concept_weights_tmp.sum(dim=0) noise_guidance_edit_tmp = torch.index_select(noise_guidance_edit.to(device), 0, warmup_inds) noise_guidance_edit_tmp = torch.einsum( "cb,cbij->bij", concept_weights_tmp, noise_guidance_edit_tmp ) noise_guidance_edit_tmp = noise_guidance_edit_tmp noise_guidance = noise_guidance + noise_guidance_edit_tmp del noise_guidance_edit_tmp del concept_weights_tmp concept_weights = concept_weights.to(device) noise_guidance_edit = noise_guidance_edit.to(device) concept_weights = torch.where( concept_weights < 0, torch.zeros_like(concept_weights), concept_weights ) concept_weights = torch.nan_to_num(concept_weights) noise_guidance_edit = torch.einsum("cb,cbij->bij", concept_weights, noise_guidance_edit) noise_guidance_edit = noise_guidance_edit + edit_momentum_scale * edit_momentum edit_momentum = edit_mom_beta * edit_momentum + (1 - edit_mom_beta) * noise_guidance_edit if warmup_inds.shape[0] == len(noise_pred_edit_concepts): noise_guidance = noise_guidance + noise_guidance_edit if sem_guidance is not None: edit_guidance = sem_guidance[i].to(device) noise_guidance = noise_guidance + edit_guidance if do_true_cfg: noise_pred = noise_guidance + noise_pred_uncond else: noise_pred = noise_guidance # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": image = latents else: latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return FluxPipelineOutput( image, )
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipeline_flux_semantic_guidance.py", "license": "Apache License 2.0", "lines": 1199, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/research_projects/autoencoderkl/train_autoencoderkl.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import contextlib import gc import logging import math import os import shutil from pathlib import Path import accelerate import lpips import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from taming.modules.losses.vqperceptual import NLayerDiscriminator, hinge_d_loss, vanilla_d_loss, weights_init from torchvision import transforms from tqdm.auto import tqdm import diffusers from diffusers import AutoencoderKL from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.33.0.dev0") logger = get_logger(__name__) @torch.no_grad() def log_validation(vae, args, accelerator, weight_dtype, step, is_final_validation=False): logger.info("Running validation... ") if not is_final_validation: vae = accelerator.unwrap_model(vae) else: vae = AutoencoderKL.from_pretrained(args.output_dir, torch_dtype=weight_dtype) images = [] inference_ctx = contextlib.nullcontext() if is_final_validation else torch.autocast("cuda") image_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) for i, validation_image in enumerate(args.validation_image): validation_image = Image.open(validation_image).convert("RGB") targets = image_transforms(validation_image).to(accelerator.device, weight_dtype) targets = targets.unsqueeze(0) with inference_ctx: reconstructions = vae(targets).sample images.append(torch.cat([targets.cpu(), reconstructions.cpu()], axis=0)) tracker_key = "test" if is_final_validation else "validation" for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images(f"{tracker_key}: Original (left), Reconstruction (right)", np_images, step) elif tracker.name == "wandb": tracker.log( { f"{tracker_key}: Original (left), Reconstruction (right)": [ wandb.Image(torchvision.utils.make_grid(image)) for _, image in enumerate(images) ] } ) else: logger.warn(f"image logging not implemented for {tracker.name}") gc.collect() torch.cuda.empty_cache() return images def save_model_card(repo_id: str, images=None, base_model=str, repo_folder=None): img_str = "" if images is not None: img_str = "You can find some example images below.\n\n" make_image_grid(images, 1, len(images)).save(os.path.join(repo_folder, "images.png")) img_str += "![images](./images.png)\n" model_description = f""" # autoencoderkl-{repo_id} These are autoencoderkl weights trained on {base_model} with new type of conditioning. {img_str} """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=base_model, model_description=model_description, inference=True, ) tags = [ "stable-diffusion", "stable-diffusion-diffusers", "image-to-image", "diffusers", "autoencoderkl", "diffusers-training", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a AutoencoderKL training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--model_config_name_or_path", type=str, default=None, help="The config of the VAE model to train, leave as None to use standard VAE model configuration.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--output_dir", type=str, default="autoencoderkl-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" "instructions." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=4.5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--disc_learning_rate", type=float, default=4.5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--disc_lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--set_grads_to_none", action="store_true", help=( "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" " behaviors, so disable this argument if it causes any problems. More info:" " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" ), ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image." ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_image", type=str, default=None, nargs="+", help="A set of paths to the image be evaluated every `--validation_steps` and logged to `--report_to`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run validation every X steps. Validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`" " and logging the images." ), ) parser.add_argument( "--tracker_project_name", type=str, default="train_autoencoderkl", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) parser.add_argument( "--rec_loss", type=str, default="l2", help="The loss function for VAE reconstruction loss.", ) parser.add_argument( "--kl_scale", type=float, default=1e-6, help="Scaling factor for the Kullback-Leibler divergence penalty term.", ) parser.add_argument( "--perceptual_scale", type=float, default=0.5, help="Scaling factor for the LPIPS metric", ) parser.add_argument( "--disc_start", type=int, default=50001, help="Start for the discriminator", ) parser.add_argument( "--disc_factor", type=float, default=1.0, help="Scaling factor for the discriminator", ) parser.add_argument( "--disc_scale", type=float, default=1.0, help="Scaling factor for the discriminator", ) parser.add_argument( "--disc_loss", type=str, default="hinge", help="Loss function for the discriminator", ) parser.add_argument( "--decoder_only", action="store_true", help="Only train the VAE decoder.", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() if args.pretrained_model_name_or_path is not None and args.model_config_name_or_path is not None: raise ValueError("Cannot specify both `--pretrained_model_name_or_path` and `--model_config_name_or_path`") if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Specify either `--dataset_name` or `--train_data_dir`") if args.resolution % 8 != 0: raise ValueError( "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the diffusion model." ) return args def make_train_dataset(args, accelerator): # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. if args.image_column is None: image_column = column_names[0] logger.info(f"image column defaulting to {image_column}") else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) image_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] images = [image_transforms(image) for image in images] examples["pixel_values"] = images return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) return train_dataset def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() return {"pixel_values": pixel_values} def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load AutoencoderKL if args.pretrained_model_name_or_path is None and args.model_config_name_or_path is None: config = AutoencoderKL.load_config("stabilityai/sd-vae-ft-mse") vae = AutoencoderKL.from_config(config) elif args.pretrained_model_name_or_path is not None: vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, revision=args.revision) else: config = AutoencoderKL.load_config(args.model_config_name_or_path) vae = AutoencoderKL.from_config(config) if args.use_ema: ema_vae = EMAModel(vae.parameters(), model_cls=AutoencoderKL, model_config=vae.config) perceptual_loss = lpips.LPIPS(net="vgg").eval() discriminator = NLayerDiscriminator(input_nc=3, n_layers=3, use_actnorm=False).apply(weights_init) discriminator = torch.nn.SyncBatchNorm.convert_sync_batchnorm(discriminator) # Taken from [Sayak Paul's Diffusers PR #6511](https://github.com/huggingface/diffusers/pull/6511/files) def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: sub_dir = "autoencoderkl_ema" ema_vae.save_pretrained(os.path.join(output_dir, sub_dir)) i = len(weights) - 1 while len(weights) > 0: weights.pop() model = models[i] if isinstance(model, AutoencoderKL): sub_dir = "autoencoderkl" model.save_pretrained(os.path.join(output_dir, sub_dir)) else: sub_dir = "discriminator" os.makedirs(os.path.join(output_dir, sub_dir), exist_ok=True) torch.save(model.state_dict(), os.path.join(output_dir, sub_dir, "pytorch_model.bin")) i -= 1 def load_model_hook(models, input_dir): while len(models) > 0: if args.use_ema: sub_dir = "autoencoderkl_ema" load_model = EMAModel.from_pretrained(os.path.join(input_dir, sub_dir), AutoencoderKL) ema_vae.load_state_dict(load_model.state_dict()) ema_vae.to(accelerator.device) del load_model # pop models so that they are not loaded again model = models.pop() load_model = NLayerDiscriminator(input_nc=3, n_layers=3, use_actnorm=False).load_state_dict( os.path.join(input_dir, "discriminator", "pytorch_model.bin") ) model.load_state_dict(load_model.state_dict()) del load_model model = models.pop() load_model = AutoencoderKL.from_pretrained(input_dir, subfolder="autoencoderkl") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) vae.requires_grad_(True) if args.decoder_only: vae.encoder.requires_grad_(False) if getattr(vae, "quant_conv", None): vae.quant_conv.requires_grad_(False) vae.train() discriminator.requires_grad_(True) discriminator.train() if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) vae.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: vae.enable_gradient_checkpointing() # Check that all trainable models are in full precision low_precision_error_string = ( " Please make sure to always have all model weights in full float32 precision when starting training - even if" " doing mixed precision training, copy of the weights should still be float32." ) if unwrap_model(vae).dtype != torch.float32: raise ValueError(f"VAE loaded as datatype {unwrap_model(vae).dtype}. {low_precision_error_string}") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW params_to_optimize = filter(lambda p: p.requires_grad, vae.parameters()) disc_params_to_optimize = filter(lambda p: p.requires_grad, discriminator.parameters()) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) disc_optimizer = optimizer_class( disc_params_to_optimize, lr=args.disc_learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) train_dataset = make_train_dataset(args, accelerator) train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) disc_lr_scheduler = get_scheduler( args.disc_lr_scheduler, optimizer=disc_optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. ( vae, discriminator, optimizer, disc_optimizer, train_dataloader, lr_scheduler, disc_lr_scheduler, ) = accelerator.prepare( vae, discriminator, optimizer, disc_optimizer, train_dataloader, lr_scheduler, disc_lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move VAE, perceptual loss and discriminator to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) perceptual_loss.to(accelerator.device, dtype=weight_dtype) discriminator.to(accelerator.device, dtype=weight_dtype) if args.use_ema: ema_vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) accelerator.init_trackers(args.tracker_project_name, config=tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) image_logs = None for epoch in range(first_epoch, args.num_train_epochs): vae.train() discriminator.train() for step, batch in enumerate(train_dataloader): # Convert images to latent space and reconstruct from them targets = batch["pixel_values"].to(dtype=weight_dtype) posterior = accelerator.unwrap_model(vae).encode(targets).latent_dist latents = posterior.sample() reconstructions = accelerator.unwrap_model(vae).decode(latents).sample if (step // args.gradient_accumulation_steps) % 2 == 0 or global_step < args.disc_start: with accelerator.accumulate(vae): # reconstruction loss. Pixel level differences between input vs output if args.rec_loss == "l2": rec_loss = F.mse_loss(reconstructions.float(), targets.float(), reduction="none") elif args.rec_loss == "l1": rec_loss = F.l1_loss(reconstructions.float(), targets.float(), reduction="none") else: raise ValueError(f"Invalid reconstruction loss type: {args.rec_loss}") # perceptual loss. The high level feature mean squared error loss with torch.no_grad(): p_loss = perceptual_loss(reconstructions, targets) rec_loss = rec_loss + args.perceptual_scale * p_loss nll_loss = rec_loss nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] kl_loss = posterior.kl() kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] logits_fake = discriminator(reconstructions) g_loss = -torch.mean(logits_fake) last_layer = accelerator.unwrap_model(vae).decoder.conv_out.weight nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] disc_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) disc_weight = torch.clamp(disc_weight, 0.0, 1e4).detach() disc_weight = disc_weight * args.disc_scale disc_factor = args.disc_factor if global_step >= args.disc_start else 0.0 loss = nll_loss + args.kl_scale * kl_loss + disc_weight * disc_factor * g_loss logs = { "loss": loss.detach().mean().item(), "nll_loss": nll_loss.detach().mean().item(), "rec_loss": rec_loss.detach().mean().item(), "p_loss": p_loss.detach().mean().item(), "kl_loss": kl_loss.detach().mean().item(), "disc_weight": disc_weight.detach().mean().item(), "disc_factor": disc_factor, "g_loss": g_loss.detach().mean().item(), "lr": lr_scheduler.get_last_lr()[0], } accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = vae.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=args.set_grads_to_none) else: with accelerator.accumulate(discriminator): logits_real = discriminator(targets) logits_fake = discriminator(reconstructions) disc_loss = hinge_d_loss if args.disc_loss == "hinge" else vanilla_d_loss disc_factor = args.disc_factor if global_step >= args.disc_start else 0.0 d_loss = disc_factor * disc_loss(logits_real, logits_fake) logs = { "disc_loss": d_loss.detach().mean().item(), "logits_real": logits_real.detach().mean().item(), "logits_fake": logits_fake.detach().mean().item(), "disc_lr": disc_lr_scheduler.get_last_lr()[0], } accelerator.backward(d_loss) if accelerator.sync_gradients: params_to_clip = discriminator.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) disc_optimizer.step() disc_lr_scheduler.step() disc_optimizer.zero_grad(set_to_none=args.set_grads_to_none) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if args.use_ema: ema_vae.step(vae.parameters()) if accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") if global_step == 1 or global_step % args.validation_steps == 0: if args.use_ema: ema_vae.store(vae.parameters()) ema_vae.copy_to(vae.parameters()) image_logs = log_validation( vae, args, accelerator, weight_dtype, global_step, ) if args.use_ema: ema_vae.restore(vae.parameters()) progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: vae = accelerator.unwrap_model(vae) discriminator = accelerator.unwrap_model(discriminator) if args.use_ema: ema_vae.copy_to(vae.parameters()) vae.save_pretrained(args.output_dir) torch.save(discriminator.state_dict(), os.path.join(args.output_dir, "pytorch_model.bin")) # Run a final round of validation. image_logs = None image_logs = log_validation( vae=vae, args=args, accelerator=accelerator, weight_dtype=weight_dtype, step=global_step, is_final_validation=True, ) if args.push_to_hub: save_model_card( repo_id, image_logs=image_logs, base_model=args.pretrained_model_name_or_path, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/autoencoderkl/train_autoencoderkl.py", "license": "Apache License 2.0", "lines": 949, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, is_invisible_watermark_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False import torch.nn as nn import torch.nn.functional as F from einops import rearrange, repeat logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import DDIMScheduler, DiffusionPipeline >>> from diffusers.utils import load_image >>> import torch.nn.functional as F >>> from torchvision.transforms.functional import to_tensor, gaussian_blur >>> dtype = torch.float16 >>> device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") >>> scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False) >>> pipeline = DiffusionPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", ... custom_pipeline="pipeline_stable_diffusion_xl_attentive_eraser", ... scheduler=scheduler, ... variant="fp16", ... use_safetensors=True, ... torch_dtype=dtype, ... ).to(device) >>> def preprocess_image(image_path, device): ... image = to_tensor((load_image(image_path))) ... image = image.unsqueeze_(0).float() * 2 - 1 # [0,1] --> [-1,1] ... if image.shape[1] != 3: ... image = image.expand(-1, 3, -1, -1) ... image = F.interpolate(image, (1024, 1024)) ... image = image.to(dtype).to(device) ... return image >>> def preprocess_mask(mask_path, device): ... mask = to_tensor((load_image(mask_path, convert_method=lambda img: img.convert('L')))) ... mask = mask.unsqueeze_(0).float() # 0 or 1 ... mask = F.interpolate(mask, (1024, 1024)) ... mask = gaussian_blur(mask, kernel_size=(77, 77)) ... mask[mask < 0.1] = 0 ... mask[mask >= 0.1] = 1 ... mask = mask.to(dtype).to(device) ... return mask >>> prompt = "" # Set prompt to null >>> seed=123 >>> generator = torch.Generator(device=device).manual_seed(seed) >>> source_image_path = "https://raw.githubusercontent.com/Anonym0u3/Images/refs/heads/main/an1024.png" >>> mask_path = "https://raw.githubusercontent.com/Anonym0u3/Images/refs/heads/main/an1024_mask.png" >>> source_image = preprocess_image(source_image_path, device) >>> mask = preprocess_mask(mask_path, device) >>> image = pipeline( ... prompt=prompt, ... image=source_image, ... mask_image=mask, ... height=1024, ... width=1024, ... AAS=True, # enable AAS ... strength=0.8, # inpainting strength ... rm_guidance_scale=9, # removal guidance scale ... ss_steps = 9, # similarity suppression steps ... ss_scale = 0.3, # similarity suppression scale ... AAS_start_step=0, # AAS start step ... AAS_start_layer=34, # AAS start layer ... AAS_end_layer=70, # AAS end layer ... num_inference_steps=50, # number of inference steps # AAS_end_step = int(strength*num_inference_steps) ... generator=generator, ... guidance_scale=1, ... ).images[0] >>> image.save('./removed_img.png') >>> print("Object removal completed") ``` """ class AttentionBase: def __init__(self): self.cur_step = 0 self.num_att_layers = -1 self.cur_att_layer = 0 def after_step(self): pass def __call__(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs): out = self.forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs) self.cur_att_layer += 1 if self.cur_att_layer == self.num_att_layers: self.cur_att_layer = 0 self.cur_step += 1 # after step self.after_step() return out def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs): out = torch.einsum("b i j, b j d -> b i d", attn, v) out = rearrange(out, "(b h) n d -> b n (h d)", h=num_heads) return out def reset(self): self.cur_step = 0 self.cur_att_layer = 0 class AAS_XL(AttentionBase): MODEL_TYPE = {"SD": 16, "SDXL": 70} def __init__( self, start_step=4, end_step=50, start_layer=10, end_layer=16, layer_idx=None, step_idx=None, total_steps=50, mask=None, model_type="SD", ss_steps=9, ss_scale=1.0, ): """ Args: start_step: the step to start AAS start_layer: the layer to start AAS layer_idx: list of the layers to apply AAS step_idx: list the steps to apply AAS total_steps: the total number of steps mask: source mask with shape (h, w) model_type: the model type, SD or SDXL """ super().__init__() self.total_steps = total_steps self.total_layers = self.MODEL_TYPE.get(model_type, 16) self.start_step = start_step self.end_step = end_step self.start_layer = start_layer self.end_layer = end_layer self.layer_idx = layer_idx if layer_idx is not None else list(range(start_layer, end_layer)) self.step_idx = step_idx if step_idx is not None else list(range(start_step, end_step)) self.mask = mask # mask with shape (1, 1 ,h, w) self.ss_steps = ss_steps self.ss_scale = ss_scale self.mask_16 = F.max_pool2d(mask, (1024 // 16, 1024 // 16)).round().squeeze().squeeze() self.mask_32 = F.max_pool2d(mask, (1024 // 32, 1024 // 32)).round().squeeze().squeeze() self.mask_64 = F.max_pool2d(mask, (1024 // 64, 1024 // 64)).round().squeeze().squeeze() self.mask_128 = F.max_pool2d(mask, (1024 // 128, 1024 // 128)).round().squeeze().squeeze() def attn_batch(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, is_mask_attn, mask, **kwargs): B = q.shape[0] // num_heads if is_mask_attn: mask_flatten = mask.flatten(0) if self.cur_step <= self.ss_steps: # background sim_bg = sim + mask_flatten.masked_fill(mask_flatten == 1, torch.finfo(sim.dtype).min) # object sim_fg = self.ss_scale * sim sim_fg += mask_flatten.masked_fill(mask_flatten == 1, torch.finfo(sim.dtype).min) sim = torch.cat([sim_fg, sim_bg], dim=0) else: sim += mask_flatten.masked_fill(mask_flatten == 1, torch.finfo(sim.dtype).min) attn = sim.softmax(-1) if len(attn) == 2 * len(v): v = torch.cat([v] * 2) out = torch.einsum("h i j, h j d -> h i d", attn, v) out = rearrange(out, "(h1 h) (b n) d -> (h1 b) n (h d)", b=B, h=num_heads) return out def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs): """ Attention forward function """ if is_cross or self.cur_step not in self.step_idx or self.cur_att_layer // 2 not in self.layer_idx: return super().forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs) H = int(np.sqrt(q.shape[1])) if H == 16: mask = self.mask_16.to(sim.device) elif H == 32: mask = self.mask_32.to(sim.device) elif H == 64: mask = self.mask_64.to(sim.device) else: mask = self.mask_128.to(sim.device) q_wo, q_w = q.chunk(2) k_wo, k_w = k.chunk(2) v_wo, v_w = v.chunk(2) sim_wo, sim_w = sim.chunk(2) attn_wo, attn_w = attn.chunk(2) out_source = self.attn_batch( q_wo, k_wo, v_wo, sim_wo, attn_wo, is_cross, place_in_unet, num_heads, is_mask_attn=False, mask=None, **kwargs, ) out_target = self.attn_batch( q_w, k_w, v_w, sim_w, attn_w, is_cross, place_in_unet, num_heads, is_mask_attn=True, mask=mask, **kwargs ) if self.mask is not None: if out_target.shape[0] == 2: out_target_fg, out_target_bg = out_target.chunk(2, 0) mask = mask.reshape(-1, 1) # (hw, 1) out_target = out_target_fg * mask + out_target_bg * (1 - mask) else: out_target = out_target out = torch.cat([out_source, out_target], dim=0) return out # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def mask_pil_to_torch(mask, height, width): # preprocess mask if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask = torch.from_numpy(mask) return mask def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False): """ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the ``image`` and ``1`` for the ``mask``. The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. Args: image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. mask (_type_): The mask to apply to the image, i.e. regions to inpaint. It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. Raises: ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not (ot the other way around). Returns: tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. """ if image is None: raise ValueError("`image` input cannot be undefined.") if mask is None: raise ValueError("`mask_image` input cannot be undefined.") if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): mask = mask_pil_to_torch(mask, height, width) if image.ndim == 3: image = image.unsqueeze(0) # Batch and add channel dim for single mask if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) # Batch single mask or add channel dim if mask.ndim == 3: # Single batched mask, no channel dim or single mask not batched but channel dim if mask.shape[0] == 1: mask = mask.unsqueeze(0) # Batched masks no channel dim else: mask = mask.unsqueeze(1) assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" # Check image is in [-1, 1] # if image.min() < -1 or image.max() > 1: # raise ValueError("Image should be in [-1, 1] range") # Check mask is in [0, 1] if mask.min() < 0 or mask.max() > 1: raise ValueError("Mask should be in [0, 1] range") # Binarize mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 # Image as float32 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): # resize all images w.r.t passed height an width image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image] image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 mask = mask_pil_to_torch(mask, height, width) mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 if image.shape[1] == 4: # images are in latent space and thus can't # be masked set masked_image to None # we assume that the checkpoint is not an inpainting # checkpoint. TOD(Yiyi) - need to clean this up later masked_image = None else: masked_image = image * (mask < 0.5) # n.b. ensure backwards compatibility as old function does not return image if return_image: return mask, masked_image, image return mask, masked_image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class StableDiffusionXL_AE_Pipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionXLLoraLoaderMixin, FromSingleFileMixin, IPAdapterMixin, ): r""" Pipeline for object removal using Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`): Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config of `stabilityai/stable-diffusion-xl-refiner-1-0`. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "add_neg_time_ids", "mask", "masked_image_latents", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, requires_aesthetics_score: bool = False, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.register_to_config(requires_aesthetics_score=requires_aesthetics_score) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) image_embeds = [] for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack( [single_negative_image_embeds] * num_images_per_prompt, dim=0 ) if do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) else: repeat_dims = [1] image_embeds = [] for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) single_negative_image_embeds = single_negative_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) ) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) else: single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) image_embeds.append(single_image_embeds) return image_embeds # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: str | None = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: str | None = None, negative_prompt_2: str | None = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError( f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}." ) if not isinstance(mask_image, PIL.Image.Image): raise ValueError( f"The mask image should be a PIL image when inpainting mask crop, but is of type" f" {type(mask_image)}." ) if output_type != "pil": raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, image=None, timestep=None, is_strength_max=True, add_noise=True, return_noise=False, return_image_latents=False, ): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and not is_strength_max): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision # mask = torch.nn.functional.interpolate( # mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) # ) mask = torch.nn.functional.max_pool2d(mask, (8, 8)).round() mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype, text_encoder_projection_dim=None, ): if self.config.requires_aesthetics_score: add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,)) add_neg_time_ids = list( negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,) ) else: add_time_ids = list(original_size + crops_coords_top_left + target_size) add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if ( expected_add_embed_dim > passed_add_embed_dim and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model." ) elif ( expected_add_embed_dim < passed_add_embed_dim and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim ): raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model." ) elif expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype) return add_time_ids, add_neg_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`") self.vae.to(dtype=torch.float32) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def do_self_attention_redirection_guidance(self): # SARG return self._rm_guidance_scale > 1 and self._AAS # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return ( self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None and not self.do_self_attention_redirection_guidance ) # CFG was disabled when SARG was used, and experiments proved that there was little difference in the effect of whether CFG was used or not @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() def image2latent(self, image: torch.Tensor, generator: torch.Generator): DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") if type(image) is Image: image = np.array(image) image = torch.from_numpy(image).float() / 127.5 - 1 image = image.permute(2, 0, 1).unsqueeze(0).to(DEVICE) # input image density range [-1, 1] # latents = self.vae.encode(image)['latent_dist'].mean latents = self._encode_vae_image(image, generator) # latents = retrieve_latents(self.vae.encode(image)) # latents = latents * self.vae.config.scaling_factor return latents def next_step(self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0.0, verbose=False): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next) ** 0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=50, eta=0.0, original_size: Tuple[int, int] = None, target_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), negative_crops_coords_top_left: Tuple[int, int] = (0, 0), aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, return_intermediates=False, **kwds, ): """ invert a real image into noise map with determinisc DDIM inversion """ DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) prompt_2 = prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(DEVICE), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=DEVICE) # define initial latents latents = self.image2latent(image, generator=None) start_latents = latents height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = (height, width) target_size = (height, width) negative_original_size = original_size negative_target_size = target_size add_text_embeds = pooled_prompt_embeds text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) add_time_ids, add_neg_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) add_time_ids = add_time_ids.repeat(batch_size, 1).to(DEVICE) # interactive sampling self.scheduler.set_timesteps(num_inference_steps) latents_list = [latents] pred_x0_list = [] # for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): for i, t in enumerate(reversed(self.scheduler.timesteps)): model_inputs = latents # predict the noise added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} noise_pred = self.unet( model_inputs, t, encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs ).sample # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) """ if t >= 1 and t < 41: latents, pred_x0 = self.next_step_degrade(noise_pred, t, latents, mask) else: latents, pred_x0 = self.next_step(noise_pred, t, latents) """ latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion # pred_x0_list = [self.latent2image(img, return_type="np") for img in pred_x0_list] # latents_list = [self.latent2image(img, return_type="np") for img in latents_list] return latents, latents_list, pred_x0_list return latents, start_latents def opt( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, ): """ predict the sample the next step in the denoise process. """ ref_noise = model_output[:1, :, :, :].expand(model_output.shape) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 x_opt = alpha_prod_t**0.5 * pred_x0 + (1 - alpha_prod_t) ** 0.5 * ref_noise return x_opt, pred_x0 def regiter_attention_editor_diffusers(self, unet, editor: AttentionBase): """ Register a attention editor to Diffuser Pipeline, refer from [Prompt-to-Prompt] """ def ca_forward(self, place_in_unet): def forward(x, encoder_hidden_states=None, attention_mask=None, context=None, mask=None): """ The attention is similar to the original implementation of LDM CrossAttention class except adding some modifications on the attention """ if encoder_hidden_states is not None: context = encoder_hidden_states if attention_mask is not None: mask = attention_mask to_out = self.to_out if isinstance(to_out, nn.modules.container.ModuleList): to_out = self.to_out[0] else: to_out = self.to_out h = self.heads q = self.to_q(x) is_cross = context is not None context = context if is_cross else x k = self.to_k(context) v = self.to_v(context) # q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) q, k, v = (rearrange(t, "b n (h d) -> (b h) n d", h=h) for t in (q, k, v)) sim = torch.einsum("b i d, b j d -> b i j", q, k) * self.scale if mask is not None: mask = rearrange(mask, "b ... -> b (...)") max_neg_value = -torch.finfo(sim.dtype).max mask = repeat(mask, "b j -> (b h) () j", h=h) mask = mask[:, None, :].repeat(h, 1, 1) sim.masked_fill_(~mask, max_neg_value) attn = sim.softmax(dim=-1) # the only difference out = editor(q, k, v, sim, attn, is_cross, place_in_unet, self.heads, scale=self.scale) return to_out(out) return forward def register_editor(net, count, place_in_unet): for name, subnet in net.named_children(): if net.__class__.__name__ == "Attention": # spatial Transformer layer net.forward = ca_forward(net, place_in_unet) return count + 1 elif hasattr(net, "children"): count = register_editor(subnet, count, place_in_unet) return count cross_att_count = 0 for net_name, net in unet.named_children(): if "down" in net_name: cross_att_count += register_editor(net, 0, "down") elif "mid" in net_name: cross_att_count += register_editor(net, 0, "mid") elif "up" in net_name: cross_att_count += register_editor(net, 0, "up") editor.num_att_layers = cross_att_count @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, mask_image: PipelineImageInput = None, masked_image_latents: torch.FloatTensor = None, height: Optional[int] = None, width: Optional[int] = None, padding_mask_crop: Optional[int] = None, strength: float = 0.9999, AAS: bool = True, # AE parameter rm_guidance_scale: float = 7.0, # AE parameter ss_steps: int = 9, # AE parameter ss_scale: float = 0.3, # AE parameter AAS_start_step: int = 0, # AE parameter AAS_start_layer: int = 34, # AE parameter AAS_end_layer: int = 70, # AE parameter num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, output_type: str | None = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders image (`PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. mask_image (`PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. padding_mask_crop (`int`, *optional*, defaults to `None`): The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region with the same aspect ration of the image and contains all masked area, and then expand that area based on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before resizing to the original image size for inpainting. This is useful when the masked area is small while the image is large and contain information inreleant for inpainging, such as background. strength (`float`, *optional*, defaults to 0.9999): Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked portion of the reference `image`. Note that in the case of `denoising_start` being declared as an integer, the value of `strength` will be ignored. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. denoising_start (`float`, *optional*): When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output). guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. aesthetic_score (`float`, *optional*, defaults to 6.0): Used to simulate an aesthetic score of the generated image by influencing the positive text condition. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_aesthetic_score (`float`, *optional*, defaults to 2.5): Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to simulate an aesthetic score of the generated image by influencing the negative text condition. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple. `tuple. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs self.check_inputs( prompt, prompt_2, image, mask_image, height, width, strength, callback_steps, output_type, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, callback_on_step_end_tensor_inputs, padding_mask_crop, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start self._interrupt = False ########### AE parameters self._num_timesteps = num_inference_steps self._rm_guidance_scale = rm_guidance_scale self._AAS = AAS self._ss_steps = ss_steps self._ss_scale = ss_scale self._AAS_start_step = AAS_start_step self._AAS_start_layer = AAS_start_layer self._AAS_end_layer = AAS_end_layer ########### # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # 4. set timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) # check that number of inference steps is not < 1 - as this doesn't make sense if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise is_strength_max = strength == 1.0 # 5. Preprocess mask and image if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop) resize_mode = "fill" else: crops_coords = None resize_mode = "default" original_image = image init_image = self.image_processor.preprocess( image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode ) init_image = init_image.to(dtype=torch.float32) mask = self.mask_processor.preprocess( mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords ) if masked_image_latents is not None: masked_image = masked_image_latents elif init_image.shape[1] == 4: # if images are in latent space, we can't mask it masked_image = None else: masked_image = init_image * (mask < 0.5) # 6. Prepare latent variables num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 add_noise = True if self.denoising_start is None else False latents_outputs = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, image=init_image, timestep=latent_timestep, is_strength_max=is_strength_max, add_noise=add_noise, return_noise=True, return_image_latents=return_image_latents, ) if return_image_latents: latents, noise, image_latents = latents_outputs else: latents, noise = latents_outputs # 7. Prepare mask latent variables mask, masked_image_latents = self.prepare_mask_latents( mask, masked_image, batch_size * num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, self.do_classifier_free_guidance, ) # 8. Check that sizes of mask, masked image and latents match if num_channels_unet == 9: # default case for stable-diffusion-v1-5/stable-diffusion-inpainting num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) elif num_channels_unet != 4: raise ValueError( f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." ) # 8.1 Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 10. Prepare added time ids & embeddings if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids, add_neg_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) ########### if self.do_self_attention_redirection_guidance: prompt_embeds = torch.cat([prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([add_text_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(2, 1) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) ############ prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # apply AAS to modify the attention module if self.do_self_attention_redirection_guidance: self._AAS_end_step = int(strength * self._num_timesteps) layer_idx = list(range(self._AAS_start_layer, self._AAS_end_layer)) editor = AAS_XL( self._AAS_start_step, self._AAS_end_step, self._AAS_start_layer, self._AAS_end_layer, layer_idx=layer_idx, mask=mask_image, model_type="SDXL", ss_steps=self._ss_steps, ss_scale=self._ss_scale, ) self.regiter_attention_editor_diffusers(self.unet, editor) # 11. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) if ( self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and self.denoising_start >= self.denoising_end ): raise ValueError( f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + f" {self.denoising_end} when using type float." ) elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 11.1 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # removal guidance latent_model_input = ( torch.cat([latents] * 2) if self.do_self_attention_redirection_guidance else latents ) # CFG was disabled when SARG was used, and experiments proved that there was little difference in the effect of whether CFG was used or not # latent_model_input_rm = torch.cat([latents]*2) if self.do_self_attention_redirection_guidance else latents # concat latents, mask, masked_image_latents in the channel dimension latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # latent_model_input = self.scheduler.scale_model_input(latent_model_input_rm, t) if num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if ip_adapter_image is not None or ip_adapter_image_embeds is not None: added_cond_kwargs["image_embeds"] = image_embeds noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform SARG if self.do_self_attention_redirection_guidance: noise_pred_wo, noise_pred_w = noise_pred.chunk(2) delta = noise_pred_w - noise_pred_wo noise_pred = noise_pred_wo + self._rm_guidance_scale * delta # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: init_mask, _ = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids) mask = callback_outputs.pop("mask", mask) masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast latents = latents[-1:] if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) # unscale/denormalize the latents # denormalize with the mean and std if available and not None has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None if has_latents_mean and has_latents_std: latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype) ) latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean else: latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: return StableDiffusionXLPipelineOutput(images=latents) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) if padding_mask_crop is not None: image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image] # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipeline_stable_diffusion_xl_attentive_eraser.py", "license": "Apache License 2.0", "lines": 2009, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:scripts/extract_lora_from_model.py
""" This script demonstrates how to extract a LoRA checkpoint from a fully finetuned model with the CogVideoX model. To make it work for other models: * Change the model class. Here we use `CogVideoXTransformer3DModel`. For Flux, it would be `FluxTransformer2DModel`, for example. (TODO: more reason to add `AutoModel`). * Spply path to the base checkpoint via `base_ckpt_path`. * Supply path to the fully fine-tuned checkpoint via `--finetune_ckpt_path`. * Change the `--rank` as needed. Example usage: ```bash python extract_lora_from_model.py \ --base_ckpt_path=THUDM/CogVideoX-5b \ --finetune_ckpt_path=finetrainers/cakeify-v0 \ --lora_out_path=cakeify_lora.safetensors ``` Script is adapted from https://github.com/Stability-AI/stability-ComfyUI-nodes/blob/001154622564b17223ce0191803c5fff7b87146c/control_lora_create.py """ import argparse import torch from safetensors.torch import save_file from tqdm.auto import tqdm from diffusers import CogVideoXTransformer3DModel RANK = 64 CLAMP_QUANTILE = 0.99 # Comes from # https://github.com/Stability-AI/stability-ComfyUI-nodes/blob/001154622564b17223ce0191803c5fff7b87146c/control_lora_create.py#L9 def extract_lora(diff, rank): # Important to use CUDA otherwise, very slow! if torch.cuda.is_available(): diff = diff.to("cuda") is_conv2d = len(diff.shape) == 4 kernel_size = None if not is_conv2d else diff.size()[2:4] is_conv2d_3x3 = is_conv2d and kernel_size != (1, 1) out_dim, in_dim = diff.size()[0:2] rank = min(rank, in_dim, out_dim) if is_conv2d: if is_conv2d_3x3: diff = diff.flatten(start_dim=1) else: diff = diff.squeeze() U, S, Vh = torch.linalg.svd(diff.float()) U = U[:, :rank] S = S[:rank] U = U @ torch.diag(S) Vh = Vh[:rank, :] dist = torch.cat([U.flatten(), Vh.flatten()]) hi_val = torch.quantile(dist, CLAMP_QUANTILE) low_val = -hi_val U = U.clamp(low_val, hi_val) Vh = Vh.clamp(low_val, hi_val) if is_conv2d: U = U.reshape(out_dim, rank, 1, 1) Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1]) return (U.cpu(), Vh.cpu()) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--base_ckpt_path", default=None, type=str, required=True, help="Base checkpoint path from which the model was finetuned. Can be a model ID on the Hub.", ) parser.add_argument( "--base_subfolder", default="transformer", type=str, help="subfolder to load the base checkpoint from if any.", ) parser.add_argument( "--finetune_ckpt_path", default=None, type=str, required=True, help="Fully fine-tuned checkpoint path. Can be a model ID on the Hub.", ) parser.add_argument( "--finetune_subfolder", default=None, type=str, help="subfolder to load the fulle finetuned checkpoint from if any.", ) parser.add_argument("--rank", default=64, type=int) parser.add_argument("--lora_out_path", default=None, type=str, required=True) args = parser.parse_args() if not args.lora_out_path.endswith(".safetensors"): raise ValueError("`lora_out_path` must end with `.safetensors`.") return args @torch.no_grad() def main(args): model_finetuned = CogVideoXTransformer3DModel.from_pretrained( args.finetune_ckpt_path, subfolder=args.finetune_subfolder, torch_dtype=torch.bfloat16 ) state_dict_ft = model_finetuned.state_dict() # Change the `subfolder` as needed. base_model = CogVideoXTransformer3DModel.from_pretrained( args.base_ckpt_path, subfolder=args.base_subfolder, torch_dtype=torch.bfloat16 ) state_dict = base_model.state_dict() output_dict = {} for k in tqdm(state_dict, desc="Extracting LoRA..."): original_param = state_dict[k] finetuned_param = state_dict_ft[k] if len(original_param.shape) >= 2: diff = finetuned_param.float() - original_param.float() out = extract_lora(diff, RANK) name = k if name.endswith(".weight"): name = name[: -len(".weight")] down_key = "{}.lora_A.weight".format(name) up_key = "{}.lora_B.weight".format(name) output_dict[up_key] = out[0].contiguous().to(finetuned_param.dtype) output_dict[down_key] = out[1].contiguous().to(finetuned_param.dtype) prefix = "transformer" if "transformer" in base_model.__class__.__name__.lower() else "unet" output_dict = {f"{prefix}.{k}": v for k, v in output_dict.items()} save_file(output_dict, args.lora_out_path) print(f"LoRA saved and it contains {len(output_dict)} keys.") if __name__ == "__main__": args = parse_args() main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "scripts/extract_lora_from_model.py", "license": "Apache License 2.0", "lines": 121, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:src/diffusers/hooks/hooks.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from typing import Any import torch from ..utils.logging import get_logger from ..utils.torch_utils import unwrap_module logger = get_logger(__name__) # pylint: disable=invalid-name class BaseState: def reset(self, *args, **kwargs) -> None: raise NotImplementedError( "BaseState::reset is not implemented. Please implement this method in the derived class." ) class StateManager: def __init__(self, state_cls: BaseState, init_args=None, init_kwargs=None): self._state_cls = state_cls self._init_args = init_args if init_args is not None else () self._init_kwargs = init_kwargs if init_kwargs is not None else {} self._state_cache = {} self._current_context = None def get_state(self): if self._current_context is None: raise ValueError("No context is set. Please set a context before retrieving the state.") if self._current_context not in self._state_cache.keys(): self._state_cache[self._current_context] = self._state_cls(*self._init_args, **self._init_kwargs) return self._state_cache[self._current_context] def set_context(self, name: str) -> None: self._current_context = name def reset(self, *args, **kwargs) -> None: for name, state in list(self._state_cache.items()): state.reset(*args, **kwargs) self._state_cache.pop(name) self._current_context = None class ModelHook: r""" A hook that contains callbacks to be executed just before and after the forward method of a model. """ _is_stateful = False def __init__(self): self.fn_ref: "HookFunctionReference" = None def initialize_hook(self, module: torch.nn.Module) -> torch.nn.Module: r""" Hook that is executed when a model is initialized. Args: module (`torch.nn.Module`): The module attached to this hook. """ return module def deinitalize_hook(self, module: torch.nn.Module) -> torch.nn.Module: r""" Hook that is executed when a model is deinitialized. Args: module (`torch.nn.Module`): The module attached to this hook. """ return module def pre_forward(self, module: torch.nn.Module, *args, **kwargs) -> tuple[tuple[Any], dict[str, Any]]: r""" Hook that is executed just before the forward method of the model. Args: module (`torch.nn.Module`): The module whose forward pass will be executed just after this event. args (`tuple[Any]`): The positional arguments passed to the module. kwargs (`dict[Str, Any]`): The keyword arguments passed to the module. Returns: `tuple[tuple[Any], dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`. """ return args, kwargs def post_forward(self, module: torch.nn.Module, output: Any) -> Any: r""" Hook that is executed just after the forward method of the model. Args: module (`torch.nn.Module`): The module whose forward pass been executed just before this event. output (`Any`): The output of the module. Returns: `Any`: The processed `output`. """ return output def detach_hook(self, module: torch.nn.Module) -> torch.nn.Module: r""" Hook that is executed when the hook is detached from a module. Args: module (`torch.nn.Module`): The module detached from this hook. """ return module def reset_state(self, module: torch.nn.Module): if self._is_stateful: raise NotImplementedError("This hook is stateful and needs to implement the `reset_state` method.") return module def _set_context(self, module: torch.nn.Module, name: str) -> None: # Iterate over all attributes of the hook to see if any of them have the type `StateManager`. If so, call `set_context` on them. for attr_name in dir(self): attr = getattr(self, attr_name) if isinstance(attr, StateManager): attr.set_context(name) return module class HookFunctionReference: def __init__(self) -> None: """A container class that maintains mutable references to forward pass functions in a hook chain. Its mutable nature allows the hook system to modify the execution chain dynamically without rebuilding the entire forward pass structure. Attributes: pre_forward: A callable that processes inputs before the main forward pass. post_forward: A callable that processes outputs after the main forward pass. forward: The current forward function in the hook chain. original_forward: The original forward function, stored when a hook provides a custom new_forward. The class enables hook removal by allowing updates to the forward chain through reference modification rather than requiring reconstruction of the entire chain. When a hook is removed, only the relevant references need to be updated, preserving the execution order of the remaining hooks. """ self.pre_forward = None self.post_forward = None self.forward = None self.original_forward = None class HookRegistry: def __init__(self, module_ref: torch.nn.Module) -> None: super().__init__() self.hooks: dict[str, ModelHook] = {} self._module_ref = module_ref self._hook_order = [] self._fn_refs = [] def register_hook(self, hook: ModelHook, name: str) -> None: if name in self.hooks.keys(): raise ValueError( f"Hook with name {name} already exists in the registry. Please use a different name or " f"first remove the existing hook and then add a new one." ) self._module_ref = hook.initialize_hook(self._module_ref) def create_new_forward(function_reference: HookFunctionReference): def new_forward(module, *args, **kwargs): args, kwargs = function_reference.pre_forward(module, *args, **kwargs) output = function_reference.forward(*args, **kwargs) return function_reference.post_forward(module, output) return new_forward forward = self._module_ref.forward fn_ref = HookFunctionReference() fn_ref.pre_forward = hook.pre_forward fn_ref.post_forward = hook.post_forward fn_ref.forward = forward if hasattr(hook, "new_forward"): fn_ref.original_forward = forward fn_ref.forward = functools.update_wrapper( functools.partial(hook.new_forward, self._module_ref), hook.new_forward ) rewritten_forward = create_new_forward(fn_ref) self._module_ref.forward = functools.update_wrapper( functools.partial(rewritten_forward, self._module_ref), rewritten_forward ) hook.fn_ref = fn_ref self.hooks[name] = hook self._hook_order.append(name) self._fn_refs.append(fn_ref) def get_hook(self, name: str) -> ModelHook | None: return self.hooks.get(name, None) def remove_hook(self, name: str, recurse: bool = True) -> None: if name in self.hooks.keys(): num_hooks = len(self._hook_order) hook = self.hooks[name] index = self._hook_order.index(name) fn_ref = self._fn_refs[index] old_forward = fn_ref.forward if fn_ref.original_forward is not None: old_forward = fn_ref.original_forward if index == num_hooks - 1: self._module_ref.forward = old_forward else: self._fn_refs[index + 1].forward = old_forward self._module_ref = hook.deinitalize_hook(self._module_ref) del self.hooks[name] self._hook_order.pop(index) self._fn_refs.pop(index) if recurse: for module_name, module in self._module_ref.named_modules(): if module_name == "": continue if hasattr(module, "_diffusers_hook"): module._diffusers_hook.remove_hook(name, recurse=False) def reset_stateful_hooks(self, recurse: bool = True) -> None: for hook_name in reversed(self._hook_order): hook = self.hooks[hook_name] if hook._is_stateful: hook.reset_state(self._module_ref) if recurse: for module_name, module in unwrap_module(self._module_ref).named_modules(): if module_name == "": continue module = unwrap_module(module) if hasattr(module, "_diffusers_hook"): module._diffusers_hook.reset_stateful_hooks(recurse=False) @classmethod def check_if_exists_or_initialize(cls, module: torch.nn.Module) -> "HookRegistry": if not hasattr(module, "_diffusers_hook"): module._diffusers_hook = cls(module) return module._diffusers_hook def _set_context(self, name: str | None = None) -> None: for hook_name in reversed(self._hook_order): hook = self.hooks[hook_name] if hook._is_stateful: hook._set_context(self._module_ref, name) for module_name, module in unwrap_module(self._module_ref).named_modules(): if module_name == "": continue module = unwrap_module(module) if hasattr(module, "_diffusers_hook"): module._diffusers_hook._set_context(name) def __repr__(self) -> str: registry_repr = "" for i, hook_name in enumerate(self._hook_order): if self.hooks[hook_name].__class__.__repr__ is not object.__repr__: hook_repr = self.hooks[hook_name].__repr__() else: hook_repr = self.hooks[hook_name].__class__.__name__ registry_repr += f" ({i}) {hook_name} - {hook_repr}" if i < len(self._hook_order) - 1: registry_repr += "\n" return f"HookRegistry(\n{registry_repr}\n)"
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/hooks/hooks.py", "license": "Apache License 2.0", "lines": 233, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/hooks/layerwise_casting.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import Type import torch from ..utils import get_logger, is_peft_available, is_peft_version from ._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS from .hooks import HookRegistry, ModelHook logger = get_logger(__name__) # pylint: disable=invalid-name # fmt: off _LAYERWISE_CASTING_HOOK = "layerwise_casting" _PEFT_AUTOCAST_DISABLE_HOOK = "peft_autocast_disable" DEFAULT_SKIP_MODULES_PATTERN = ("pos_embed", "patch_embed", "norm", "^proj_in$", "^proj_out$") # fmt: on _SHOULD_DISABLE_PEFT_INPUT_AUTOCAST = is_peft_available() and is_peft_version(">", "0.14.0") if _SHOULD_DISABLE_PEFT_INPUT_AUTOCAST: from peft.helpers import disable_input_dtype_casting from peft.tuners.tuners_utils import BaseTunerLayer class LayerwiseCastingHook(ModelHook): r""" A hook that casts the weights of a module to a high precision dtype for computation, and to a low precision dtype for storage. This process may lead to quality loss in the output, but can significantly reduce the memory footprint. """ _is_stateful = False def __init__(self, storage_dtype: torch.dtype, compute_dtype: torch.dtype, non_blocking: bool) -> None: self.storage_dtype = storage_dtype self.compute_dtype = compute_dtype self.non_blocking = non_blocking def initialize_hook(self, module: torch.nn.Module): module.to(dtype=self.storage_dtype, non_blocking=self.non_blocking) return module def deinitalize_hook(self, module: torch.nn.Module): raise NotImplementedError( "LayerwiseCastingHook does not support deinitialization. A model once enabled with layerwise casting will " "have casted its weights to a lower precision dtype for storage. Casting this back to the original dtype " "will lead to precision loss, which might have an impact on the model's generation quality. The model should " "be re-initialized and loaded in the original dtype." ) def pre_forward(self, module: torch.nn.Module, *args, **kwargs): module.to(dtype=self.compute_dtype, non_blocking=self.non_blocking) return args, kwargs def post_forward(self, module: torch.nn.Module, output): module.to(dtype=self.storage_dtype, non_blocking=self.non_blocking) return output class PeftInputAutocastDisableHook(ModelHook): r""" A hook that disables the casting of inputs to the module weight dtype during the forward pass. By default, PEFT casts the inputs to the weight dtype of the module, which can lead to precision loss. The reasons for needing this are: - If we don't add PEFT layers' weight names to `skip_modules_pattern` when applying layerwise casting, the inputs will be casted to the, possibly lower precision, storage dtype. Reference: https://github.com/huggingface/peft/blob/0facdebf6208139cbd8f3586875acb378813dd97/src/peft/tuners/lora/layer.py#L706 - We can, on our end, use something like accelerate's `send_to_device` but for dtypes. This way, we can ensure that the inputs are casted to the computation dtype correctly always. However, there are two goals we are hoping to achieve: 1. Making forward implementations independent of device/dtype casting operations as much as possible. 2. Performing inference without losing information from casting to different precisions. With the current PEFT implementation (as linked in the reference above), and assuming running layerwise casting inference with storage_dtype=torch.float8_e4m3fn and compute_dtype=torch.bfloat16, inputs are cast to torch.float8_e4m3fn in the lora layer. We will then upcast back to torch.bfloat16 when we continue the forward pass in PEFT linear forward or Diffusers layer forward, with a `send_to_dtype` operation from LayerwiseCastingHook. This will be a lossy operation and result in poorer generation quality. """ def new_forward(self, module: torch.nn.Module, *args, **kwargs): with disable_input_dtype_casting(module): return self.fn_ref.original_forward(*args, **kwargs) def apply_layerwise_casting( module: torch.nn.Module, storage_dtype: torch.dtype, compute_dtype: torch.dtype, skip_modules_pattern: str | tuple[str, ...] = "auto", skip_modules_classes: tuple[Type[torch.nn.Module], ...] | None = None, non_blocking: bool = False, ) -> None: r""" Applies layerwise casting to a given module. The module expected here is a Diffusers ModelMixin but it can be any nn.Module using diffusers layers or pytorch primitives. Example: ```python >>> import torch >>> from diffusers import CogVideoXTransformer3DModel >>> transformer = CogVideoXTransformer3DModel.from_pretrained( ... model_id, subfolder="transformer", torch_dtype=torch.bfloat16 ... ) >>> apply_layerwise_casting( ... transformer, ... storage_dtype=torch.float8_e4m3fn, ... compute_dtype=torch.bfloat16, ... skip_modules_pattern=["patch_embed", "norm", "proj_out"], ... non_blocking=True, ... ) ``` Args: module (`torch.nn.Module`): The module whose leaf modules will be cast to a high precision dtype for computation, and to a low precision dtype for storage. storage_dtype (`torch.dtype`): The dtype to cast the module to before/after the forward pass for storage. compute_dtype (`torch.dtype`): The dtype to cast the module to during the forward pass for computation. skip_modules_pattern (`tuple[str, ...]`, defaults to `"auto"`): A list of patterns to match the names of the modules to skip during the layerwise casting process. If set to `"auto"`, the default patterns are used. If set to `None`, no modules are skipped. If set to `None` alongside `skip_modules_classes` being `None`, the layerwise casting is applied directly to the module instead of its internal submodules. skip_modules_classes (`tuple[Type[torch.nn.Module], ...]`, defaults to `None`): A list of module classes to skip during the layerwise casting process. non_blocking (`bool`, defaults to `False`): If `True`, the weight casting operations are non-blocking. """ if skip_modules_pattern == "auto": skip_modules_pattern = DEFAULT_SKIP_MODULES_PATTERN if skip_modules_classes is None and skip_modules_pattern is None: apply_layerwise_casting_hook(module, storage_dtype, compute_dtype, non_blocking) return _apply_layerwise_casting( module, storage_dtype, compute_dtype, skip_modules_pattern, skip_modules_classes, non_blocking, ) _disable_peft_input_autocast(module) def _apply_layerwise_casting( module: torch.nn.Module, storage_dtype: torch.dtype, compute_dtype: torch.dtype, skip_modules_pattern: tuple[str, ...] | None = None, skip_modules_classes: tuple[Type[torch.nn.Module], ...] | None = None, non_blocking: bool = False, _prefix: str = "", ) -> None: should_skip = (skip_modules_classes is not None and isinstance(module, skip_modules_classes)) or ( skip_modules_pattern is not None and any(re.search(pattern, _prefix) for pattern in skip_modules_pattern) ) if should_skip: logger.debug(f'Skipping layerwise casting for layer "{_prefix}"') return if isinstance(module, _GO_LC_SUPPORTED_PYTORCH_LAYERS): logger.debug(f'Applying layerwise casting to layer "{_prefix}"') apply_layerwise_casting_hook(module, storage_dtype, compute_dtype, non_blocking) return for name, submodule in module.named_children(): layer_name = f"{_prefix}.{name}" if _prefix else name _apply_layerwise_casting( submodule, storage_dtype, compute_dtype, skip_modules_pattern, skip_modules_classes, non_blocking, _prefix=layer_name, ) def apply_layerwise_casting_hook( module: torch.nn.Module, storage_dtype: torch.dtype, compute_dtype: torch.dtype, non_blocking: bool ) -> None: r""" Applies a `LayerwiseCastingHook` to a given module. Args: module (`torch.nn.Module`): The module to attach the hook to. storage_dtype (`torch.dtype`): The dtype to cast the module to before the forward pass. compute_dtype (`torch.dtype`): The dtype to cast the module to during the forward pass. non_blocking (`bool`): If `True`, the weight casting operations are non-blocking. """ registry = HookRegistry.check_if_exists_or_initialize(module) hook = LayerwiseCastingHook(storage_dtype, compute_dtype, non_blocking) registry.register_hook(hook, _LAYERWISE_CASTING_HOOK) def _is_layerwise_casting_active(module: torch.nn.Module) -> bool: for submodule in module.modules(): if ( hasattr(submodule, "_diffusers_hook") and submodule._diffusers_hook.get_hook(_LAYERWISE_CASTING_HOOK) is not None ): return True return False def _disable_peft_input_autocast(module: torch.nn.Module) -> None: if not _SHOULD_DISABLE_PEFT_INPUT_AUTOCAST: return for submodule in module.modules(): if isinstance(submodule, BaseTunerLayer) and _is_layerwise_casting_active(submodule): registry = HookRegistry.check_if_exists_or_initialize(submodule) hook = PeftInputAutocastDisableHook() registry.register_hook(hook, _PEFT_AUTOCAST_DISABLE_HOOK)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/hooks/layerwise_casting.py", "license": "Apache License 2.0", "lines": 200, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license