sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
huggingface/diffusers:src/diffusers/pipelines/ltx2/connectors.py
import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...models.attention import FeedForward from ...models.modeling_utils import ModelMixin from ...models.transformers.transformer_ltx2 import LTX2Attention, LTX2AudioVideoAttnProcessor class LTX2RotaryPosEmbed1d(nn.Module): """ 1D rotary positional embeddings (RoPE) for the LTX 2.0 text encoder connectors. """ def __init__( self, dim: int, base_seq_len: int = 4096, theta: float = 10000.0, double_precision: bool = True, rope_type: str = "interleaved", num_attention_heads: int = 32, ): super().__init__() if rope_type not in ["interleaved", "split"]: raise ValueError(f"{rope_type=} not supported. Choose between 'interleaved' and 'split'.") self.dim = dim self.base_seq_len = base_seq_len self.theta = theta self.double_precision = double_precision self.rope_type = rope_type self.num_attention_heads = num_attention_heads def forward( self, batch_size: int, pos: int, device: str | torch.device, ) -> tuple[torch.Tensor, torch.Tensor]: # 1. Get 1D position ids grid_1d = torch.arange(pos, dtype=torch.float32, device=device) # Get fractional indices relative to self.base_seq_len grid_1d = grid_1d / self.base_seq_len grid = grid_1d.unsqueeze(0).repeat(batch_size, 1) # [batch_size, seq_len] # 2. Calculate 1D RoPE frequencies num_rope_elems = 2 # 1 (because 1D) * 2 (for cos, sin) = 2 freqs_dtype = torch.float64 if self.double_precision else torch.float32 pow_indices = torch.pow( self.theta, torch.linspace(start=0.0, end=1.0, steps=self.dim // num_rope_elems, dtype=freqs_dtype, device=device), ) freqs = (pow_indices * torch.pi / 2.0).to(dtype=torch.float32) # 3. Matrix-vector outer product between pos ids of shape (batch_size, seq_len) and freqs vector of shape # (self.dim // 2,). freqs = (grid.unsqueeze(-1) * 2 - 1) * freqs # [B, seq_len, self.dim // 2] # 4. Get real, interleaved (cos, sin) frequencies, padded to self.dim if self.rope_type == "interleaved": cos_freqs = freqs.cos().repeat_interleave(2, dim=-1) sin_freqs = freqs.sin().repeat_interleave(2, dim=-1) if self.dim % num_rope_elems != 0: cos_padding = torch.ones_like(cos_freqs[:, :, : self.dim % num_rope_elems]) sin_padding = torch.zeros_like(sin_freqs[:, :, : self.dim % num_rope_elems]) cos_freqs = torch.cat([cos_padding, cos_freqs], dim=-1) sin_freqs = torch.cat([sin_padding, sin_freqs], dim=-1) elif self.rope_type == "split": expected_freqs = self.dim // 2 current_freqs = freqs.shape[-1] pad_size = expected_freqs - current_freqs cos_freq = freqs.cos() sin_freq = freqs.sin() if pad_size != 0: cos_padding = torch.ones_like(cos_freq[:, :, :pad_size]) sin_padding = torch.zeros_like(sin_freq[:, :, :pad_size]) cos_freq = torch.concatenate([cos_padding, cos_freq], axis=-1) sin_freq = torch.concatenate([sin_padding, sin_freq], axis=-1) # Reshape freqs to be compatible with multi-head attention b = cos_freq.shape[0] t = cos_freq.shape[1] cos_freq = cos_freq.reshape(b, t, self.num_attention_heads, -1) sin_freq = sin_freq.reshape(b, t, self.num_attention_heads, -1) cos_freqs = torch.swapaxes(cos_freq, 1, 2) # (B,H,T,D//2) sin_freqs = torch.swapaxes(sin_freq, 1, 2) # (B,H,T,D//2) return cos_freqs, sin_freqs class LTX2TransformerBlock1d(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, activation_fn: str = "gelu-approximate", eps: float = 1e-6, rope_type: str = "interleaved", ): super().__init__() self.norm1 = torch.nn.RMSNorm(dim, eps=eps, elementwise_affine=False) self.attn1 = LTX2Attention( query_dim=dim, heads=num_attention_heads, kv_heads=num_attention_heads, dim_head=attention_head_dim, processor=LTX2AudioVideoAttnProcessor(), rope_type=rope_type, ) self.norm2 = torch.nn.RMSNorm(dim, eps=eps, elementwise_affine=False) self.ff = FeedForward(dim, activation_fn=activation_fn) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: norm_hidden_states = self.norm1(hidden_states) attn_hidden_states = self.attn1(norm_hidden_states, attention_mask=attention_mask, query_rotary_emb=rotary_emb) hidden_states = hidden_states + attn_hidden_states norm_hidden_states = self.norm2(hidden_states) ff_hidden_states = self.ff(norm_hidden_states) hidden_states = hidden_states + ff_hidden_states return hidden_states class LTX2ConnectorTransformer1d(nn.Module): """ A 1D sequence transformer for modalities such as text. In LTX 2.0, this is used to process the text encoder hidden states for each of the video and audio streams. """ _supports_gradient_checkpointing = True def __init__( self, num_attention_heads: int = 30, attention_head_dim: int = 128, num_layers: int = 2, num_learnable_registers: int | None = 128, rope_base_seq_len: int = 4096, rope_theta: float = 10000.0, rope_double_precision: bool = True, eps: float = 1e-6, causal_temporal_positioning: bool = False, rope_type: str = "interleaved", ): super().__init__() self.num_attention_heads = num_attention_heads self.inner_dim = num_attention_heads * attention_head_dim self.causal_temporal_positioning = causal_temporal_positioning self.num_learnable_registers = num_learnable_registers self.learnable_registers = None if num_learnable_registers is not None: init_registers = torch.rand(num_learnable_registers, self.inner_dim) * 2.0 - 1.0 self.learnable_registers = torch.nn.Parameter(init_registers) self.rope = LTX2RotaryPosEmbed1d( self.inner_dim, base_seq_len=rope_base_seq_len, theta=rope_theta, double_precision=rope_double_precision, rope_type=rope_type, num_attention_heads=num_attention_heads, ) self.transformer_blocks = torch.nn.ModuleList( [ LTX2TransformerBlock1d( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, rope_type=rope_type, ) for _ in range(num_layers) ] ) self.norm_out = torch.nn.RMSNorm(self.inner_dim, eps=eps, elementwise_affine=False) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, attn_mask_binarize_threshold: float = -9000.0, ) -> tuple[torch.Tensor, torch.Tensor]: # hidden_states shape: [batch_size, seq_len, hidden_dim] # attention_mask shape: [batch_size, seq_len] or [batch_size, 1, 1, seq_len] batch_size, seq_len, _ = hidden_states.shape # 1. Replace padding with learned registers, if using if self.learnable_registers is not None: if seq_len % self.num_learnable_registers != 0: raise ValueError( f"The `hidden_states` sequence length {hidden_states.shape[1]} should be divisible by the number" f" of learnable registers {self.num_learnable_registers}" ) num_register_repeats = seq_len // self.num_learnable_registers registers = torch.tile(self.learnable_registers, (num_register_repeats, 1)) # [seq_len, inner_dim] binary_attn_mask = (attention_mask >= attn_mask_binarize_threshold).int() if binary_attn_mask.ndim == 4: binary_attn_mask = binary_attn_mask.squeeze(1).squeeze(1) # [B, 1, 1, L] --> [B, L] hidden_states_non_padded = [hidden_states[i, binary_attn_mask[i].bool(), :] for i in range(batch_size)] valid_seq_lens = [x.shape[0] for x in hidden_states_non_padded] pad_lengths = [seq_len - valid_seq_len for valid_seq_len in valid_seq_lens] padded_hidden_states = [ F.pad(x, pad=(0, 0, 0, p), value=0) for x, p in zip(hidden_states_non_padded, pad_lengths) ] padded_hidden_states = torch.cat([x.unsqueeze(0) for x in padded_hidden_states], dim=0) # [B, L, D] flipped_mask = torch.flip(binary_attn_mask, dims=[1]).unsqueeze(-1) # [B, L, 1] hidden_states = flipped_mask * padded_hidden_states + (1 - flipped_mask) * registers # Overwrite attention_mask with an all-zeros mask if using registers. attention_mask = torch.zeros_like(attention_mask) # 2. Calculate 1D RoPE positional embeddings rotary_emb = self.rope(batch_size, seq_len, device=hidden_states.device) # 3. Run 1D transformer blocks for block in self.transformer_blocks: if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(block, hidden_states, attention_mask, rotary_emb) else: hidden_states = block(hidden_states, attention_mask=attention_mask, rotary_emb=rotary_emb) hidden_states = self.norm_out(hidden_states) return hidden_states, attention_mask class LTX2TextConnectors(ModelMixin, PeftAdapterMixin, ConfigMixin): """ Text connector stack used by LTX 2.0 to process the packed text encoder hidden states for both the video and audio streams. """ @register_to_config def __init__( self, caption_channels: int, text_proj_in_factor: int, video_connector_num_attention_heads: int, video_connector_attention_head_dim: int, video_connector_num_layers: int, video_connector_num_learnable_registers: int | None, audio_connector_num_attention_heads: int, audio_connector_attention_head_dim: int, audio_connector_num_layers: int, audio_connector_num_learnable_registers: int | None, connector_rope_base_seq_len: int, rope_theta: float, rope_double_precision: bool, causal_temporal_positioning: bool, rope_type: str = "interleaved", ): super().__init__() self.text_proj_in = nn.Linear(caption_channels * text_proj_in_factor, caption_channels, bias=False) self.video_connector = LTX2ConnectorTransformer1d( num_attention_heads=video_connector_num_attention_heads, attention_head_dim=video_connector_attention_head_dim, num_layers=video_connector_num_layers, num_learnable_registers=video_connector_num_learnable_registers, rope_base_seq_len=connector_rope_base_seq_len, rope_theta=rope_theta, rope_double_precision=rope_double_precision, causal_temporal_positioning=causal_temporal_positioning, rope_type=rope_type, ) self.audio_connector = LTX2ConnectorTransformer1d( num_attention_heads=audio_connector_num_attention_heads, attention_head_dim=audio_connector_attention_head_dim, num_layers=audio_connector_num_layers, num_learnable_registers=audio_connector_num_learnable_registers, rope_base_seq_len=connector_rope_base_seq_len, rope_theta=rope_theta, rope_double_precision=rope_double_precision, causal_temporal_positioning=causal_temporal_positioning, rope_type=rope_type, ) def forward( self, text_encoder_hidden_states: torch.Tensor, attention_mask: torch.Tensor, additive_mask: bool = False ): # Convert to additive attention mask, if necessary if not additive_mask: text_dtype = text_encoder_hidden_states.dtype attention_mask = (attention_mask - 1).reshape(attention_mask.shape[0], 1, -1, attention_mask.shape[-1]) attention_mask = attention_mask.to(text_dtype) * torch.finfo(text_dtype).max text_encoder_hidden_states = self.text_proj_in(text_encoder_hidden_states) video_text_embedding, new_attn_mask = self.video_connector(text_encoder_hidden_states, attention_mask) attn_mask = (new_attn_mask < 1e-6).to(torch.int64) attn_mask = attn_mask.reshape(video_text_embedding.shape[0], video_text_embedding.shape[1], 1) video_text_embedding = video_text_embedding * attn_mask new_attn_mask = attn_mask.squeeze(-1) audio_text_embedding, _ = self.audio_connector(text_encoder_hidden_states, attention_mask) return video_text_embedding, audio_text_embedding, new_attn_mask
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx2/connectors.py", "license": "Apache License 2.0", "lines": 270, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:src/diffusers/pipelines/ltx2/export_utils.py
# Copyright 2025 The Lightricks team and The HuggingFace Team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Iterator from fractions import Fraction from itertools import chain import numpy as np import PIL.Image import torch from tqdm import tqdm from ...utils import get_logger, is_av_available logger = get_logger(__name__) # pylint: disable=invalid-name _CAN_USE_AV = is_av_available() if _CAN_USE_AV: import av else: raise ImportError( "PyAV is required to use LTX 2.0 video export utilities. You can install it with `pip install av`" ) def _prepare_audio_stream(container: av.container.Container, audio_sample_rate: int) -> av.audio.AudioStream: """ Prepare the audio stream for writing. """ audio_stream = container.add_stream("aac", rate=audio_sample_rate) audio_stream.codec_context.sample_rate = audio_sample_rate audio_stream.codec_context.layout = "stereo" audio_stream.codec_context.time_base = Fraction(1, audio_sample_rate) return audio_stream def _resample_audio( container: av.container.Container, audio_stream: av.audio.AudioStream, frame_in: av.AudioFrame ) -> None: cc = audio_stream.codec_context # Use the encoder's format/layout/rate as the *target* target_format = cc.format or "fltp" # AAC → usually fltp target_layout = cc.layout or "stereo" target_rate = cc.sample_rate or frame_in.sample_rate audio_resampler = av.audio.resampler.AudioResampler( format=target_format, layout=target_layout, rate=target_rate, ) audio_next_pts = 0 for rframe in audio_resampler.resample(frame_in): if rframe.pts is None: rframe.pts = audio_next_pts audio_next_pts += rframe.samples rframe.sample_rate = frame_in.sample_rate container.mux(audio_stream.encode(rframe)) # flush audio encoder for packet in audio_stream.encode(): container.mux(packet) def _write_audio( container: av.container.Container, audio_stream: av.audio.AudioStream, samples: torch.Tensor, audio_sample_rate: int, ) -> None: if samples.ndim == 1: samples = samples[:, None] if samples.shape[1] != 2 and samples.shape[0] == 2: samples = samples.T if samples.shape[1] != 2: raise ValueError(f"Expected samples with 2 channels; got shape {samples.shape}.") # Convert to int16 packed for ingestion; resampler converts to encoder fmt. if samples.dtype != torch.int16: samples = torch.clip(samples, -1.0, 1.0) samples = (samples * 32767.0).to(torch.int16) frame_in = av.AudioFrame.from_ndarray( samples.contiguous().reshape(1, -1).cpu().numpy(), format="s16", layout="stereo", ) frame_in.sample_rate = audio_sample_rate _resample_audio(container, audio_stream, frame_in) def encode_video( video: list[PIL.Image.Image] | np.ndarray | torch.Tensor | Iterator[torch.Tensor], fps: int, audio: torch.Tensor, audio_sample_rate: int, output_path: str, video_chunks_number: int = 1, ) -> None: """ Encodes a video with audio using the PyAV library. Based on code from the original LTX-2 repo: https://github.com/Lightricks/LTX-2/blob/4f410820b198e05074a1e92de793e3b59e9ab5a0/packages/ltx-pipelines/src/ltx_pipelines/utils/media_io.py#L182 Args: video (`List[PIL.Image.Image]` or `np.ndarray` or `torch.Tensor`): A video tensor of shape [frames, height, width, channels] with integer pixel values in [0, 255]. If the input is a `np.ndarray`, it is expected to be a float array with values in [0, 1] (which is what pipelines usually return with `output_type="np"`). fps (`int`) The frames per second (FPS) of the encoded video. audio (`torch.Tensor`, *optional*): An audio waveform of shape [audio_channels, samples]. audio_sample_rate: (`int`, *optional*): The sampling rate of the audio waveform. For LTX 2, this is typically 24000 (24 kHz). output_path (`str`): The path to save the encoded video to. video_chunks_number (`int`, *optional*, defaults to `1`): The number of chunks to split the video into for encoding. Each chunk will be encoded separately. The number of chunks to use often depends on the tiling config for the video VAE. """ if isinstance(video, list) and isinstance(video[0], PIL.Image.Image): # Pipeline output_type="pil"; assumes each image is in "RGB" mode video_frames = [np.array(frame) for frame in video] video = np.stack(video_frames, axis=0) video = torch.from_numpy(video) elif isinstance(video, np.ndarray): # Pipeline output_type="np" is_denormalized = np.logical_and(np.zeros_like(video) <= video, video <= np.ones_like(video)) if np.all(is_denormalized): video = (video * 255).round().astype("uint8") else: logger.warning( "Supplied `numpy.ndarray` does not have values in [0, 1]. The values will be assumed to be pixel " "values in [0, ..., 255] and will be used as is." ) video = torch.from_numpy(video) if isinstance(video, torch.Tensor): # Split into video_chunks_number along the frame dimension video = torch.tensor_split(video, video_chunks_number, dim=0) video = iter(video) first_chunk = next(video) _, height, width, _ = first_chunk.shape container = av.open(output_path, mode="w") stream = container.add_stream("libx264", rate=int(fps)) stream.width = width stream.height = height stream.pix_fmt = "yuv420p" if audio is not None: if audio_sample_rate is None: raise ValueError("audio_sample_rate is required when audio is provided") audio_stream = _prepare_audio_stream(container, audio_sample_rate) for video_chunk in tqdm(chain([first_chunk], video), total=video_chunks_number, desc="Encoding video chunks"): video_chunk_cpu = video_chunk.to("cpu").numpy() for frame_array in video_chunk_cpu: frame = av.VideoFrame.from_ndarray(frame_array, format="rgb24") for packet in stream.encode(frame): container.mux(packet) # Flush encoder for packet in stream.encode(): container.mux(packet) if audio is not None: _write_audio(container, audio_stream, audio, audio_sample_rate) container.close()
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx2/export_utils.py", "license": "Apache License 2.0", "lines": 156, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/ltx2/latent_upsampler.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin RATIONAL_RESAMPLER_SCALE_MAPPING = { 0.75: (3, 4), 1.5: (3, 2), 2.0: (2, 1), 4.0: (4, 1), } # Copied from diffusers.pipelines.ltx.modeling_latent_upsampler.ResBlock class ResBlock(torch.nn.Module): def __init__(self, channels: int, mid_channels: int | None = None, dims: int = 3): super().__init__() if mid_channels is None: mid_channels = channels Conv = torch.nn.Conv2d if dims == 2 else torch.nn.Conv3d self.conv1 = Conv(channels, mid_channels, kernel_size=3, padding=1) self.norm1 = torch.nn.GroupNorm(32, mid_channels) self.conv2 = Conv(mid_channels, channels, kernel_size=3, padding=1) self.norm2 = torch.nn.GroupNorm(32, channels) self.activation = torch.nn.SiLU() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: residual = hidden_states hidden_states = self.conv1(hidden_states) hidden_states = self.norm1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = self.activation(hidden_states + residual) return hidden_states # Copied from diffusers.pipelines.ltx.modeling_latent_upsampler.PixelShuffleND class PixelShuffleND(torch.nn.Module): def __init__(self, dims, upscale_factors=(2, 2, 2)): super().__init__() self.dims = dims self.upscale_factors = upscale_factors if dims not in [1, 2, 3]: raise ValueError("dims must be 1, 2, or 3") def forward(self, x): if self.dims == 3: # spatiotemporal: b (c p1 p2 p3) d h w -> b c (d p1) (h p2) (w p3) return ( x.unflatten(1, (-1, *self.upscale_factors[:3])) .permute(0, 1, 5, 2, 6, 3, 7, 4) .flatten(6, 7) .flatten(4, 5) .flatten(2, 3) ) elif self.dims == 2: # spatial: b (c p1 p2) h w -> b c (h p1) (w p2) return ( x.unflatten(1, (-1, *self.upscale_factors[:2])).permute(0, 1, 4, 2, 5, 3).flatten(4, 5).flatten(2, 3) ) elif self.dims == 1: # temporal: b (c p1) f h w -> b c (f p1) h w return x.unflatten(1, (-1, *self.upscale_factors[:1])).permute(0, 1, 3, 2, 4, 5).flatten(2, 3) class BlurDownsample(torch.nn.Module): """ Anti-aliased spatial downsampling by integer stride using a fixed separable binomial kernel. Applies only on H,W. Works for dims=2 or dims=3 (per-frame). """ def __init__(self, dims: int, stride: int, kernel_size: int = 5) -> None: super().__init__() if dims not in (2, 3): raise ValueError(f"`dims` must be either 2 or 3 but is {dims}") if kernel_size < 3 or kernel_size % 2 != 1: raise ValueError(f"`kernel_size` must be an odd number >= 3 but is {kernel_size}") self.dims = dims self.stride = stride self.kernel_size = kernel_size # 5x5 separable binomial kernel using binomial coefficients [1, 4, 6, 4, 1] from # the 4th row of Pascal's triangle. This kernel is used for anti-aliasing and # provides a smooth approximation of a Gaussian filter (often called a "binomial filter"). # The 2D kernel is constructed as the outer product and normalized. k = torch.tensor([math.comb(kernel_size - 1, k) for k in range(kernel_size)]) k2d = k[:, None] @ k[None, :] k2d = (k2d / k2d.sum()).float() # shape (kernel_size, kernel_size) self.register_buffer("kernel", k2d[None, None, :, :]) # (1, 1, kernel_size, kernel_size) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.stride == 1: return x if self.dims == 2: c = x.shape[1] weight = self.kernel.expand(c, 1, self.kernel_size, self.kernel_size) # depthwise x = F.conv2d(x, weight=weight, bias=None, stride=self.stride, padding=self.kernel_size // 2, groups=c) else: # dims == 3: apply per-frame on H,W b, c, f, _, _ = x.shape x = x.transpose(1, 2).flatten(0, 1) # [B, C, F, H, W] --> [B * F, C, H, W] weight = self.kernel.expand(c, 1, self.kernel_size, self.kernel_size) # depthwise x = F.conv2d(x, weight=weight, bias=None, stride=self.stride, padding=self.kernel_size // 2, groups=c) h2, w2 = x.shape[-2:] x = x.unflatten(0, (b, f)).reshape(b, -1, f, h2, w2) # [B * F, C, H, W] --> [B, C, F, H, W] return x class SpatialRationalResampler(torch.nn.Module): """ Scales by the spatial size of the input by a rational number `scale`. For example, `scale = 0.75` will downsample by a factor of 3 / 4, while `scale = 1.5` will upsample by a factor of 3 / 2. This works by first upsampling the input by the (integer) numerator of `scale`, and then performing a blur + stride anti-aliased downsample by the (integer) denominator. """ def __init__(self, mid_channels: int = 1024, scale: float = 2.0): super().__init__() self.scale = float(scale) num_denom = RATIONAL_RESAMPLER_SCALE_MAPPING.get(scale, None) if num_denom is None: raise ValueError( f"The supplied `scale` {scale} is not supported; supported scales are {list(RATIONAL_RESAMPLER_SCALE_MAPPING.keys())}" ) self.num, self.den = num_denom self.conv = torch.nn.Conv2d(mid_channels, (self.num**2) * mid_channels, kernel_size=3, padding=1) self.pixel_shuffle = PixelShuffleND(2, upscale_factors=(self.num, self.num)) self.blur_down = BlurDownsample(dims=2, stride=self.den) def forward(self, x: torch.Tensor) -> torch.Tensor: # Expected x shape: [B * F, C, H, W] # b, _, f, h, w = x.shape # x = x.transpose(1, 2).flatten(0, 1) # [B, C, F, H, W] --> [B * F, C, H, W] x = self.conv(x) x = self.pixel_shuffle(x) x = self.blur_down(x) # x = x.unflatten(0, (b, f)).reshape(b, -1, f, h, w) # [B * F, C, H, W] --> [B, C, F, H, W] return x class LTX2LatentUpsamplerModel(ModelMixin, ConfigMixin): """ Model to spatially upsample VAE latents. Args: in_channels (`int`, defaults to `128`): Number of channels in the input latent mid_channels (`int`, defaults to `512`): Number of channels in the middle layers num_blocks_per_stage (`int`, defaults to `4`): Number of ResBlocks to use in each stage (pre/post upsampling) dims (`int`, defaults to `3`): Number of dimensions for convolutions (2 or 3) spatial_upsample (`bool`, defaults to `True`): Whether to spatially upsample the latent temporal_upsample (`bool`, defaults to `False`): Whether to temporally upsample the latent """ @register_to_config def __init__( self, in_channels: int = 128, mid_channels: int = 1024, num_blocks_per_stage: int = 4, dims: int = 3, spatial_upsample: bool = True, temporal_upsample: bool = False, rational_spatial_scale: float | None = 2.0, ): super().__init__() self.in_channels = in_channels self.mid_channels = mid_channels self.num_blocks_per_stage = num_blocks_per_stage self.dims = dims self.spatial_upsample = spatial_upsample self.temporal_upsample = temporal_upsample ConvNd = torch.nn.Conv2d if dims == 2 else torch.nn.Conv3d self.initial_conv = ConvNd(in_channels, mid_channels, kernel_size=3, padding=1) self.initial_norm = torch.nn.GroupNorm(32, mid_channels) self.initial_activation = torch.nn.SiLU() self.res_blocks = torch.nn.ModuleList([ResBlock(mid_channels, dims=dims) for _ in range(num_blocks_per_stage)]) if spatial_upsample and temporal_upsample: self.upsampler = torch.nn.Sequential( torch.nn.Conv3d(mid_channels, 8 * mid_channels, kernel_size=3, padding=1), PixelShuffleND(3), ) elif spatial_upsample: if rational_spatial_scale is not None: self.upsampler = SpatialRationalResampler(mid_channels=mid_channels, scale=rational_spatial_scale) else: self.upsampler = torch.nn.Sequential( torch.nn.Conv2d(mid_channels, 4 * mid_channels, kernel_size=3, padding=1), PixelShuffleND(2), ) elif temporal_upsample: self.upsampler = torch.nn.Sequential( torch.nn.Conv3d(mid_channels, 2 * mid_channels, kernel_size=3, padding=1), PixelShuffleND(1), ) else: raise ValueError("Either spatial_upsample or temporal_upsample must be True") self.post_upsample_res_blocks = torch.nn.ModuleList( [ResBlock(mid_channels, dims=dims) for _ in range(num_blocks_per_stage)] ) self.final_conv = ConvNd(mid_channels, in_channels, kernel_size=3, padding=1) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape if self.dims == 2: hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) hidden_states = self.initial_conv(hidden_states) hidden_states = self.initial_norm(hidden_states) hidden_states = self.initial_activation(hidden_states) for block in self.res_blocks: hidden_states = block(hidden_states) hidden_states = self.upsampler(hidden_states) for block in self.post_upsample_res_blocks: hidden_states = block(hidden_states) hidden_states = self.final_conv(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3, 4) else: hidden_states = self.initial_conv(hidden_states) hidden_states = self.initial_norm(hidden_states) hidden_states = self.initial_activation(hidden_states) for block in self.res_blocks: hidden_states = block(hidden_states) if self.temporal_upsample: hidden_states = self.upsampler(hidden_states) hidden_states = hidden_states[:, :, 1:, :, :] else: hidden_states = hidden_states.permute(0, 2, 1, 3, 4).flatten(0, 1) hidden_states = self.upsampler(hidden_states) hidden_states = hidden_states.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3, 4) for block in self.post_upsample_res_blocks: hidden_states = block(hidden_states) hidden_states = self.final_conv(hidden_states) return hidden_states
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx2/latent_upsampler.py", "license": "Apache License 2.0", "lines": 232, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/ltx2/pipeline_ltx2.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect from typing import Any, Callable import numpy as np import torch from transformers import Gemma3ForConditionalGeneration, GemmaTokenizer, GemmaTokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import FromSingleFileMixin, LTX2LoraLoaderMixin from ...models.autoencoders import AutoencoderKLLTX2Audio, AutoencoderKLLTX2Video from ...models.transformers import LTX2VideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .connectors import LTX2TextConnectors from .pipeline_output import LTX2PipelineOutput from .vocoder import LTX2Vocoder if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import LTX2Pipeline >>> from diffusers.pipelines.ltx2.export_utils import encode_video >>> pipe = LTX2Pipeline.from_pretrained("Lightricks/LTX-2", torch_dtype=torch.bfloat16) >>> pipe.enable_model_cpu_offload() >>> prompt = "A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage" >>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" >>> frame_rate = 24.0 >>> video, audio = pipe( ... prompt=prompt, ... negative_prompt=negative_prompt, ... width=768, ... height=512, ... num_frames=121, ... frame_rate=frame_rate, ... num_inference_steps=40, ... guidance_scale=4.0, ... output_type="np", ... return_dict=False, ... ) >>> encode_video( ... video[0], ... fps=frame_rate, ... audio=audio[0].float().cpu(), ... audio_sample_rate=pipe.vocoder.config.output_sampling_rate, # should be 24000 ... output_path="video.mp4", ... ) ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class LTX2Pipeline(DiffusionPipeline, FromSingleFileMixin, LTX2LoraLoaderMixin): r""" Pipeline for text-to-video generation. Reference: https://github.com/Lightricks/LTX-Video Args: transformer ([`LTXVideoTransformer3DModel`]): Conditional Transformer architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLLTXVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). connectors ([`LTX2TextConnectors`]): Text connector stack used to adapt text encoder hidden states for the video and audio branches. """ model_cpu_offload_seq = "text_encoder->connectors->transformer->vae->audio_vae->vocoder" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLLTX2Video, audio_vae: AutoencoderKLLTX2Audio, text_encoder: Gemma3ForConditionalGeneration, tokenizer: GemmaTokenizer | GemmaTokenizerFast, connectors: LTX2TextConnectors, transformer: LTX2VideoTransformer3DModel, vocoder: LTX2Vocoder, ): super().__init__() self.register_modules( vae=vae, audio_vae=audio_vae, text_encoder=text_encoder, tokenizer=tokenizer, connectors=connectors, transformer=transformer, vocoder=vocoder, scheduler=scheduler, ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 ) # TODO: check whether the MEL compression ratio logic here is corrct self.audio_vae_mel_compression_ratio = ( self.audio_vae.mel_compression_ratio if getattr(self, "audio_vae", None) is not None else 4 ) self.audio_vae_temporal_compression_ratio = ( self.audio_vae.temporal_compression_ratio if getattr(self, "audio_vae", None) is not None else 4 ) self.transformer_spatial_patch_size = ( self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 ) self.transformer_temporal_patch_size = ( self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1 ) self.audio_sampling_rate = ( self.audio_vae.config.sample_rate if getattr(self, "audio_vae", None) is not None else 16000 ) self.audio_hop_length = ( self.audio_vae.config.mel_hop_length if getattr(self, "audio_vae", None) is not None else 160 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.tokenizer_max_length = ( self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 1024 ) @staticmethod def _pack_text_embeds( text_hidden_states: torch.Tensor, sequence_lengths: torch.Tensor, device: str | torch.device, padding_side: str = "left", scale_factor: int = 8, eps: float = 1e-6, ) -> torch.Tensor: """ Packs and normalizes text encoder hidden states, respecting padding. Normalization is performed per-batch and per-layer in a masked fashion (only over non-padded positions). Args: text_hidden_states (`torch.Tensor` of shape `(batch_size, seq_len, hidden_dim, num_layers)`): Per-layer hidden_states from a text encoder (e.g. `Gemma3ForConditionalGeneration`). sequence_lengths (`torch.Tensor of shape `(batch_size,)`): The number of valid (non-padded) tokens for each batch instance. device: (`str` or `torch.device`, *optional*): torch device to place the resulting embeddings on padding_side: (`str`, *optional*, defaults to `"left"`): Whether the text tokenizer performs padding on the `"left"` or `"right"`. scale_factor (`int`, *optional*, defaults to `8`): Scaling factor to multiply the normalized hidden states by. eps (`float`, *optional*, defaults to `1e-6`): A small positive value for numerical stability when performing normalization. Returns: `torch.Tensor` of shape `(batch_size, seq_len, hidden_dim * num_layers)`: Normed and flattened text encoder hidden states. """ batch_size, seq_len, hidden_dim, num_layers = text_hidden_states.shape original_dtype = text_hidden_states.dtype # Create padding mask token_indices = torch.arange(seq_len, device=device).unsqueeze(0) if padding_side == "right": # For right padding, valid tokens are from 0 to sequence_length-1 mask = token_indices < sequence_lengths[:, None] # [batch_size, seq_len] elif padding_side == "left": # For left padding, valid tokens are from (T - sequence_length) to T-1 start_indices = seq_len - sequence_lengths[:, None] # [batch_size, 1] mask = token_indices >= start_indices # [B, T] else: raise ValueError(f"padding_side must be 'left' or 'right', got {padding_side}") mask = mask[:, :, None, None] # [batch_size, seq_len] --> [batch_size, seq_len, 1, 1] # Compute masked mean over non-padding positions of shape (batch_size, 1, 1, seq_len) masked_text_hidden_states = text_hidden_states.masked_fill(~mask, 0.0) num_valid_positions = (sequence_lengths * hidden_dim).view(batch_size, 1, 1, 1) masked_mean = masked_text_hidden_states.sum(dim=(1, 2), keepdim=True) / (num_valid_positions + eps) # Compute min/max over non-padding positions of shape (batch_size, 1, 1 seq_len) x_min = text_hidden_states.masked_fill(~mask, float("inf")).amin(dim=(1, 2), keepdim=True) x_max = text_hidden_states.masked_fill(~mask, float("-inf")).amax(dim=(1, 2), keepdim=True) # Normalization normalized_hidden_states = (text_hidden_states - masked_mean) / (x_max - x_min + eps) normalized_hidden_states = normalized_hidden_states * scale_factor # Pack the hidden states to a 3D tensor (batch_size, seq_len, hidden_dim * num_layers) normalized_hidden_states = normalized_hidden_states.flatten(2) mask_flat = mask.squeeze(-1).expand(-1, -1, hidden_dim * num_layers) normalized_hidden_states = normalized_hidden_states.masked_fill(~mask_flat, 0.0) normalized_hidden_states = normalized_hidden_states.to(dtype=original_dtype) return normalized_hidden_states def _get_gemma_prompt_embeds( self, prompt: str | list[str], num_videos_per_prompt: int = 1, max_sequence_length: int = 1024, scale_factor: int = 8, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`str` or `torch.device`): torch device to place the resulting embeddings on dtype: (`torch.dtype`): torch dtype to cast the prompt embeds to max_sequence_length (`int`, defaults to 1024): Maximum sequence length to use for the prompt. """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if getattr(self, "tokenizer", None) is not None: # Gemma expects left padding for chat-style prompts self.tokenizer.padding_side = "left" if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token prompt = [p.strip() for p in prompt] text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask text_input_ids = text_input_ids.to(device) prompt_attention_mask = prompt_attention_mask.to(device) text_encoder_outputs = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True ) text_encoder_hidden_states = text_encoder_outputs.hidden_states text_encoder_hidden_states = torch.stack(text_encoder_hidden_states, dim=-1) sequence_lengths = prompt_attention_mask.sum(dim=-1) prompt_embeds = self._pack_text_embeds( text_encoder_hidden_states, sequence_lengths, device=device, padding_side=self.tokenizer.padding_side, scale_factor=scale_factor, ) prompt_embeds = prompt_embeds.to(dtype=dtype) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, max_sequence_length: int = 1024, scale_factor: int = 8, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, scale_factor=scale_factor, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, scale_factor=scale_factor, device=device, dtype=dtype, ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask def check_inputs( self, prompt, height, width, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) @staticmethod def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor: # Unpacked latents of shape are [B, C, F, H, W] are patched into tokens of shape [B, C, F // p_t, p_t, H // p, p, W // p, p]. # The patch dimensions are then permuted and collapsed into the channel dimension of shape: # [B, F // p_t * H // p * W // p, C * p_t * p * p] (an ndim=3 tensor). # dim=0 is the batch size, dim=1 is the effective video sequence length, dim=2 is the effective number of input features batch_size, num_channels, num_frames, height, width = latents.shape post_patch_num_frames = num_frames // patch_size_t post_patch_height = height // patch_size post_patch_width = width // patch_size latents = latents.reshape( batch_size, -1, post_patch_num_frames, patch_size_t, post_patch_height, patch_size, post_patch_width, patch_size, ) latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3) return latents @staticmethod def _unpack_latents( latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1 ) -> torch.Tensor: # Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions) # are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of # what happens in the `_pack_latents` method. batch_size = latents.size(0) latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size) latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3) return latents @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2_image2video.LTX2ImageToVideoPipeline._normalize_latents def _normalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Normalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = (latents - latents_mean) * scaling_factor / latents_std return latents @staticmethod def _denormalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Denormalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / scaling_factor + latents_mean return latents @staticmethod def _normalize_audio_latents(latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor): latents_mean = latents_mean.to(latents.device, latents.dtype) latents_std = latents_std.to(latents.device, latents.dtype) return (latents - latents_mean) / latents_std @staticmethod def _denormalize_audio_latents(latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor): latents_mean = latents_mean.to(latents.device, latents.dtype) latents_std = latents_std.to(latents.device, latents.dtype) return (latents * latents_std) + latents_mean @staticmethod def _create_noised_state( latents: torch.Tensor, noise_scale: float | torch.Tensor, generator: torch.Generator | None = None ): noise = randn_tensor(latents.shape, generator=generator, device=latents.device, dtype=latents.dtype) noised_latents = noise_scale * noise + (1 - noise_scale) * latents return noised_latents @staticmethod def _pack_audio_latents( latents: torch.Tensor, patch_size: int | None = None, patch_size_t: int | None = None ) -> torch.Tensor: # Audio latents shape: [B, C, L, M], where L is the latent audio length and M is the number of mel bins if patch_size is not None and patch_size_t is not None: # Packs the latents into a patch sequence of shape [B, L // p_t * M // p, C * p_t * p] (a ndim=3 tnesor). # dim=1 is the effective audio sequence length and dim=2 is the effective audio input feature size. batch_size, num_channels, latent_length, latent_mel_bins = latents.shape post_patch_latent_length = latent_length / patch_size_t post_patch_mel_bins = latent_mel_bins / patch_size latents = latents.reshape( batch_size, -1, post_patch_latent_length, patch_size_t, post_patch_mel_bins, patch_size ) latents = latents.permute(0, 2, 4, 1, 3, 5).flatten(3, 5).flatten(1, 2) else: # Packs the latents into a patch sequence of shape [B, L, C * M]. This implicitly assumes a (mel) # patch_size of M (all mel bins constitutes a single patch) and a patch_size_t of 1. latents = latents.transpose(1, 2).flatten(2, 3) # [B, C, L, M] --> [B, L, C * M] return latents @staticmethod def _unpack_audio_latents( latents: torch.Tensor, latent_length: int, num_mel_bins: int, patch_size: int | None = None, patch_size_t: int | None = None, ) -> torch.Tensor: # Unpacks an audio patch sequence of shape [B, S, D] into a latent spectrogram tensor of shape [B, C, L, M], # where L is the latent audio length and M is the number of mel bins. if patch_size is not None and patch_size_t is not None: batch_size = latents.size(0) latents = latents.reshape(batch_size, latent_length, num_mel_bins, -1, patch_size_t, patch_size) latents = latents.permute(0, 3, 1, 4, 2, 5).flatten(4, 5).flatten(2, 3) else: # Assume [B, S, D] = [B, L, C * M], which implies that patch_size = M and patch_size_t = 1. latents = latents.unflatten(2, (-1, num_mel_bins)).transpose(1, 2) return latents def prepare_latents( self, batch_size: int = 1, num_channels_latents: int = 128, height: int = 512, width: int = 768, num_frames: int = 121, noise_scale: float = 0.0, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if latents is not None: if latents.ndim == 5: latents = self._normalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) # latents are of shape [B, C, F, H, W], need to be packed latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) if latents.ndim != 3: raise ValueError( f"Provided `latents` tensor has shape {latents.shape}, but the expected shape is [batch_size, num_seq, num_features]." ) latents = self._create_noised_state(latents, noise_scale, generator) return latents.to(device=device, dtype=dtype) height = height // self.vae_spatial_compression_ratio width = width // self.vae_spatial_compression_ratio num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 shape = (batch_size, num_channels_latents, num_frames, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) return latents def prepare_audio_latents( self, batch_size: int = 1, num_channels_latents: int = 8, audio_latent_length: int = 1, # 1 is just a dummy value num_mel_bins: int = 64, noise_scale: float = 0.0, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if latents is not None: if latents.ndim == 4: # latents are of shape [B, C, L, M], need to be packed latents = self._pack_audio_latents(latents) if latents.ndim != 3: raise ValueError( f"Provided `latents` tensor has shape {latents.shape}, but the expected shape is [batch_size, num_seq, num_features]." ) latents = self._normalize_audio_latents(latents, self.audio_vae.latents_mean, self.audio_vae.latents_std) latents = self._create_noised_state(latents, noise_scale, generator) return latents.to(device=device, dtype=dtype) # TODO: confirm whether this logic is correct latent_mel_bins = num_mel_bins // self.audio_vae_mel_compression_ratio shape = (batch_size, num_channels_latents, audio_latent_length, latent_mel_bins) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_audio_latents(latents) return latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, negative_prompt: str | list[str] | None = None, height: int = 512, width: int = 768, num_frames: int = 121, frame_rate: float = 24.0, num_inference_steps: int = 40, sigmas: list[float] | None = None, timesteps: list[int] = None, guidance_scale: float = 4.0, guidance_rescale: float = 0.0, noise_scale: float = 0.0, num_videos_per_prompt: int = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, audio_latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, decode_timestep: float | list[float] = 0.0, decode_noise_scale: float | list[float] | None = None, output_type: str = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 1024, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to `512`): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, *optional*, defaults to `768`): The width in pixels of the generated image. This is set to 848 by default for the best results. num_frames (`int`, *optional*, defaults to `121`): The number of video frames to generate frame_rate (`float`, *optional*, defaults to `24.0`): The frames per second (FPS) of the generated video. num_inference_steps (`int`, *optional*, defaults to 40): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. timesteps (`list[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to `4.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. noise_scale (`float`, *optional*, defaults to `0.0`): The interpolation factor between random noise and denoised latents at each timestep. Applying noise to the `latents` and `audio_latents` before continue denoising. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. audio_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for audio generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. decode_timestep (`float`, defaults to `0.0`): The timestep at which generated video is decoded. decode_noise_scale (`float`, defaults to `None`): The interpolation factor between random noise and denoised latents at the decode timestep. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ltx.LTX2PipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*, defaults to `["latents"]`): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to `1024`): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ltx.LTX2PipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ltx.LTX2PipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._attention_kwargs = attention_kwargs self._interrupt = False self._current_timestep = None # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Prepare text embeddings ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, device=device, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) additive_attention_mask = (1 - prompt_attention_mask.to(prompt_embeds.dtype)) * -1000000.0 connector_prompt_embeds, connector_audio_prompt_embeds, connector_attention_mask = self.connectors( prompt_embeds, additive_attention_mask, additive_mask=True ) # 4. Prepare latent variables latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio if latents is not None: if latents.ndim == 5: logger.info( "Got latents of shape [batch_size, latent_dim, latent_frames, latent_height, latent_width], `latent_num_frames`, `latent_height`, `latent_width` will be inferred." ) _, _, latent_num_frames, latent_height, latent_width = latents.shape # [B, C, F, H, W] elif latents.ndim == 3: logger.warning( f"You have supplied packed `latents` of shape {latents.shape}, so the latent dims cannot be" f" inferred. Make sure the supplied `height`, `width`, and `num_frames` are correct." ) else: raise ValueError( f"Provided `latents` tensor has shape {latents.shape}, but the expected shape is either [batch_size, seq_len, num_features] or [batch_size, latent_dim, latent_frames, latent_height, latent_width]." ) video_sequence_length = latent_num_frames * latent_height * latent_width num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, noise_scale, torch.float32, device, generator, latents, ) duration_s = num_frames / frame_rate audio_latents_per_second = ( self.audio_sampling_rate / self.audio_hop_length / float(self.audio_vae_temporal_compression_ratio) ) audio_num_frames = round(duration_s * audio_latents_per_second) if audio_latents is not None: if audio_latents.ndim == 4: logger.info( "Got audio_latents of shape [batch_size, num_channels, audio_length, mel_bins], `audio_num_frames` will be inferred." ) _, _, audio_num_frames, _ = audio_latents.shape # [B, C, L, M] elif audio_latents.ndim == 3: logger.warning( f"You have supplied packed `audio_latents` of shape {audio_latents.shape}, so the latent dims" f" cannot be inferred. Make sure the supplied `num_frames` and `frame_rate` are correct." ) else: raise ValueError( f"Provided `audio_latents` tensor has shape {audio_latents.shape}, but the expected shape is either [batch_size, seq_len, num_features] or [batch_size, num_channels, audio_length, mel_bins]." ) num_mel_bins = self.audio_vae.config.mel_bins if getattr(self, "audio_vae", None) is not None else 64 latent_mel_bins = num_mel_bins // self.audio_vae_mel_compression_ratio num_channels_latents_audio = ( self.audio_vae.config.latent_channels if getattr(self, "audio_vae", None) is not None else 8 ) audio_latents = self.prepare_audio_latents( batch_size * num_videos_per_prompt, num_channels_latents=num_channels_latents_audio, audio_latent_length=audio_num_frames, num_mel_bins=num_mel_bins, noise_scale=noise_scale, dtype=torch.float32, device=device, generator=generator, latents=audio_latents, ) # 5. Prepare timesteps sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas mu = calculate_shift( video_sequence_length, self.scheduler.config.get("base_image_seq_len", 1024), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.95), self.scheduler.config.get("max_shift", 2.05), ) # For now, duplicate the scheduler for use with the audio latents audio_scheduler = copy.deepcopy(self.scheduler) _, _ = retrieve_timesteps( audio_scheduler, num_inference_steps, device, timesteps, sigmas=sigmas, mu=mu, ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Prepare micro-conditions rope_interpolation_scale = ( self.vae_temporal_compression_ratio / frame_rate, self.vae_spatial_compression_ratio, self.vae_spatial_compression_ratio, ) # Pre-compute video and audio positional ids as they will be the same at each step of the denoising loop video_coords = self.transformer.rope.prepare_video_coords( latents.shape[0], latent_num_frames, latent_height, latent_width, latents.device, fps=frame_rate ) audio_coords = self.transformer.audio_rope.prepare_audio_coords( audio_latents.shape[0], audio_num_frames, audio_latents.device ) # Duplicate the positional ids as well if using CFG if self.do_classifier_free_guidance: video_coords = video_coords.repeat((2,) + (1,) * (video_coords.ndim - 1)) # Repeat twice in batch dim audio_coords = audio_coords.repeat((2,) + (1,) * (audio_coords.ndim - 1)) # 7. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = latent_model_input.to(prompt_embeds.dtype) audio_latent_model_input = ( torch.cat([audio_latents] * 2) if self.do_classifier_free_guidance else audio_latents ) audio_latent_model_input = audio_latent_model_input.to(prompt_embeds.dtype) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) with self.transformer.cache_context("cond_uncond"): noise_pred_video, noise_pred_audio = self.transformer( hidden_states=latent_model_input, audio_hidden_states=audio_latent_model_input, encoder_hidden_states=connector_prompt_embeds, audio_encoder_hidden_states=connector_audio_prompt_embeds, timestep=timestep, encoder_attention_mask=connector_attention_mask, audio_encoder_attention_mask=connector_attention_mask, num_frames=latent_num_frames, height=latent_height, width=latent_width, fps=frame_rate, audio_num_frames=audio_num_frames, video_coords=video_coords, audio_coords=audio_coords, # rope_interpolation_scale=rope_interpolation_scale, attention_kwargs=attention_kwargs, return_dict=False, ) noise_pred_video = noise_pred_video.float() noise_pred_audio = noise_pred_audio.float() if self.do_classifier_free_guidance: noise_pred_video_uncond, noise_pred_video_text = noise_pred_video.chunk(2) noise_pred_video = noise_pred_video_uncond + self.guidance_scale * ( noise_pred_video_text - noise_pred_video_uncond ) noise_pred_audio_uncond, noise_pred_audio_text = noise_pred_audio.chunk(2) noise_pred_audio = noise_pred_audio_uncond + self.guidance_scale * ( noise_pred_audio_text - noise_pred_audio_uncond ) if self.guidance_rescale > 0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred_video = rescale_noise_cfg( noise_pred_video, noise_pred_video_text, guidance_rescale=self.guidance_rescale ) noise_pred_audio = rescale_noise_cfg( noise_pred_audio, noise_pred_audio_text, guidance_rescale=self.guidance_rescale ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred_video, t, latents, return_dict=False)[0] # NOTE: for now duplicate scheduler for audio latents in case self.scheduler sets internal state in # the step method (such as _step_index) audio_latents = audio_scheduler.step(noise_pred_audio, t, audio_latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) latents = self._denormalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) audio_latents = self._denormalize_audio_latents( audio_latents, self.audio_vae.latents_mean, self.audio_vae.latents_std ) audio_latents = self._unpack_audio_latents(audio_latents, audio_num_frames, num_mel_bins=latent_mel_bins) if output_type == "latent": video = latents audio = audio_latents else: latents = latents.to(prompt_embeds.dtype) if not self.vae.config.timestep_conditioning: timestep = None else: noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) if not isinstance(decode_timestep, list): decode_timestep = [decode_timestep] * batch_size if decode_noise_scale is None: decode_noise_scale = decode_timestep elif not isinstance(decode_noise_scale, list): decode_noise_scale = [decode_noise_scale] * batch_size timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype) decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[ :, None, None, None, None ] latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise latents = latents.to(self.vae.dtype) video = self.vae.decode(latents, timestep, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) audio_latents = audio_latents.to(self.audio_vae.dtype) generated_mel_spectrograms = self.audio_vae.decode(audio_latents, return_dict=False)[0] audio = self.vocoder(generated_mel_spectrograms) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video, audio) return LTX2PipelineOutput(frames=video, audio=audio)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx2/pipeline_ltx2.py", "license": "Apache License 2.0", "lines": 1096, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/ltx2/pipeline_ltx2_image2video.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect from typing import Any, Callable import numpy as np import torch from transformers import Gemma3ForConditionalGeneration, GemmaTokenizer, GemmaTokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import FromSingleFileMixin, LTX2LoraLoaderMixin from ...models.autoencoders import AutoencoderKLLTX2Audio, AutoencoderKLLTX2Video from ...models.transformers import LTX2VideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .connectors import LTX2TextConnectors from .pipeline_output import LTX2PipelineOutput from .vocoder import LTX2Vocoder if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import LTX2ImageToVideoPipeline >>> from diffusers.pipelines.ltx2.export_utils import encode_video >>> from diffusers.utils import load_image >>> pipe = LTX2ImageToVideoPipeline.from_pretrained("Lightricks/LTX-2", torch_dtype=torch.bfloat16) >>> pipe.enable_model_cpu_offload() >>> image = load_image( ... "https://huggingface.co/datasets/a-r-r-o-w/tiny-meme-dataset-captioned/resolve/main/images/8.png" ... ) >>> prompt = "A young girl stands calmly in the foreground, looking directly at the camera, as a house fire rages in the background." >>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" >>> frame_rate = 24.0 >>> video, audio = pipe( ... image=image, ... prompt=prompt, ... negative_prompt=negative_prompt, ... width=768, ... height=512, ... num_frames=121, ... frame_rate=frame_rate, ... num_inference_steps=40, ... guidance_scale=4.0, ... output_type="np", ... return_dict=False, ... ) >>> encode_video( ... video[0], ... fps=frame_rate, ... audio=audio[0].float().cpu(), ... audio_sample_rate=pipe.vocoder.config.output_sampling_rate, # should be 24000 ... output_path="video.mp4", ... ) ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class LTX2ImageToVideoPipeline(DiffusionPipeline, FromSingleFileMixin, LTX2LoraLoaderMixin): r""" Pipeline for image-to-video generation. Reference: https://github.com/Lightricks/LTX-Video TODO """ model_cpu_offload_seq = "text_encoder->connectors->transformer->vae->audio_vae->vocoder" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLLTX2Video, audio_vae: AutoencoderKLLTX2Audio, text_encoder: Gemma3ForConditionalGeneration, tokenizer: GemmaTokenizer | GemmaTokenizerFast, connectors: LTX2TextConnectors, transformer: LTX2VideoTransformer3DModel, vocoder: LTX2Vocoder, ): super().__init__() self.register_modules( vae=vae, audio_vae=audio_vae, text_encoder=text_encoder, tokenizer=tokenizer, connectors=connectors, transformer=transformer, vocoder=vocoder, scheduler=scheduler, ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 ) # TODO: check whether the MEL compression ratio logic here is corrct self.audio_vae_mel_compression_ratio = ( self.audio_vae.mel_compression_ratio if getattr(self, "audio_vae", None) is not None else 4 ) self.audio_vae_temporal_compression_ratio = ( self.audio_vae.temporal_compression_ratio if getattr(self, "audio_vae", None) is not None else 4 ) self.transformer_spatial_patch_size = ( self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 ) self.transformer_temporal_patch_size = ( self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1 ) self.audio_sampling_rate = ( self.audio_vae.config.sample_rate if getattr(self, "audio_vae", None) is not None else 16000 ) self.audio_hop_length = ( self.audio_vae.config.mel_hop_length if getattr(self, "audio_vae", None) is not None else 160 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio, resample="bilinear") self.tokenizer_max_length = ( self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 1024 ) @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._pack_text_embeds def _pack_text_embeds( text_hidden_states: torch.Tensor, sequence_lengths: torch.Tensor, device: str | torch.device, padding_side: str = "left", scale_factor: int = 8, eps: float = 1e-6, ) -> torch.Tensor: """ Packs and normalizes text encoder hidden states, respecting padding. Normalization is performed per-batch and per-layer in a masked fashion (only over non-padded positions). Args: text_hidden_states (`torch.Tensor` of shape `(batch_size, seq_len, hidden_dim, num_layers)`): Per-layer hidden_states from a text encoder (e.g. `Gemma3ForConditionalGeneration`). sequence_lengths (`torch.Tensor of shape `(batch_size,)`): The number of valid (non-padded) tokens for each batch instance. device: (`str` or `torch.device`, *optional*): torch device to place the resulting embeddings on padding_side: (`str`, *optional*, defaults to `"left"`): Whether the text tokenizer performs padding on the `"left"` or `"right"`. scale_factor (`int`, *optional*, defaults to `8`): Scaling factor to multiply the normalized hidden states by. eps (`float`, *optional*, defaults to `1e-6`): A small positive value for numerical stability when performing normalization. Returns: `torch.Tensor` of shape `(batch_size, seq_len, hidden_dim * num_layers)`: Normed and flattened text encoder hidden states. """ batch_size, seq_len, hidden_dim, num_layers = text_hidden_states.shape original_dtype = text_hidden_states.dtype # Create padding mask token_indices = torch.arange(seq_len, device=device).unsqueeze(0) if padding_side == "right": # For right padding, valid tokens are from 0 to sequence_length-1 mask = token_indices < sequence_lengths[:, None] # [batch_size, seq_len] elif padding_side == "left": # For left padding, valid tokens are from (T - sequence_length) to T-1 start_indices = seq_len - sequence_lengths[:, None] # [batch_size, 1] mask = token_indices >= start_indices # [B, T] else: raise ValueError(f"padding_side must be 'left' or 'right', got {padding_side}") mask = mask[:, :, None, None] # [batch_size, seq_len] --> [batch_size, seq_len, 1, 1] # Compute masked mean over non-padding positions of shape (batch_size, 1, 1, seq_len) masked_text_hidden_states = text_hidden_states.masked_fill(~mask, 0.0) num_valid_positions = (sequence_lengths * hidden_dim).view(batch_size, 1, 1, 1) masked_mean = masked_text_hidden_states.sum(dim=(1, 2), keepdim=True) / (num_valid_positions + eps) # Compute min/max over non-padding positions of shape (batch_size, 1, 1 seq_len) x_min = text_hidden_states.masked_fill(~mask, float("inf")).amin(dim=(1, 2), keepdim=True) x_max = text_hidden_states.masked_fill(~mask, float("-inf")).amax(dim=(1, 2), keepdim=True) # Normalization normalized_hidden_states = (text_hidden_states - masked_mean) / (x_max - x_min + eps) normalized_hidden_states = normalized_hidden_states * scale_factor # Pack the hidden states to a 3D tensor (batch_size, seq_len, hidden_dim * num_layers) normalized_hidden_states = normalized_hidden_states.flatten(2) mask_flat = mask.squeeze(-1).expand(-1, -1, hidden_dim * num_layers) normalized_hidden_states = normalized_hidden_states.masked_fill(~mask_flat, 0.0) normalized_hidden_states = normalized_hidden_states.to(dtype=original_dtype) return normalized_hidden_states # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._get_gemma_prompt_embeds def _get_gemma_prompt_embeds( self, prompt: str | list[str], num_videos_per_prompt: int = 1, max_sequence_length: int = 1024, scale_factor: int = 8, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`str` or `torch.device`): torch device to place the resulting embeddings on dtype: (`torch.dtype`): torch dtype to cast the prompt embeds to max_sequence_length (`int`, defaults to 1024): Maximum sequence length to use for the prompt. """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if getattr(self, "tokenizer", None) is not None: # Gemma expects left padding for chat-style prompts self.tokenizer.padding_side = "left" if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token prompt = [p.strip() for p in prompt] text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask text_input_ids = text_input_ids.to(device) prompt_attention_mask = prompt_attention_mask.to(device) text_encoder_outputs = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True ) text_encoder_hidden_states = text_encoder_outputs.hidden_states text_encoder_hidden_states = torch.stack(text_encoder_hidden_states, dim=-1) sequence_lengths = prompt_attention_mask.sum(dim=-1) prompt_embeds = self._pack_text_embeds( text_encoder_hidden_states, sequence_lengths, device=device, padding_side=self.tokenizer.padding_side, scale_factor=scale_factor, ) prompt_embeds = prompt_embeds.to(dtype=dtype) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, max_sequence_length: int = 1024, scale_factor: int = 8, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, scale_factor=scale_factor, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, scale_factor=scale_factor, device=device, dtype=dtype, ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline.check_inputs def check_inputs( self, prompt, height, width, callback_on_step_end_tensor_inputs=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._pack_latents def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor: # Unpacked latents of shape are [B, C, F, H, W] are patched into tokens of shape [B, C, F // p_t, p_t, H // p, p, W // p, p]. # The patch dimensions are then permuted and collapsed into the channel dimension of shape: # [B, F // p_t * H // p * W // p, C * p_t * p * p] (an ndim=3 tensor). # dim=0 is the batch size, dim=1 is the effective video sequence length, dim=2 is the effective number of input features batch_size, num_channels, num_frames, height, width = latents.shape post_patch_num_frames = num_frames // patch_size_t post_patch_height = height // patch_size post_patch_width = width // patch_size latents = latents.reshape( batch_size, -1, post_patch_num_frames, patch_size_t, post_patch_height, patch_size, post_patch_width, patch_size, ) latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3) return latents @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._unpack_latents def _unpack_latents( latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1 ) -> torch.Tensor: # Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions) # are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of # what happens in the `_pack_latents` method. batch_size = latents.size(0) latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size) latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3) return latents @staticmethod def _normalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Normalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = (latents - latents_mean) * scaling_factor / latents_std return latents @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._denormalize_latents def _denormalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Denormalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / scaling_factor + latents_mean return latents @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._create_noised_state def _create_noised_state( latents: torch.Tensor, noise_scale: float | torch.Tensor, generator: torch.Generator | None = None ): noise = randn_tensor(latents.shape, generator=generator, device=latents.device, dtype=latents.dtype) noised_latents = noise_scale * noise + (1 - noise_scale) * latents return noised_latents @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._pack_audio_latents def _pack_audio_latents( latents: torch.Tensor, patch_size: int | None = None, patch_size_t: int | None = None ) -> torch.Tensor: # Audio latents shape: [B, C, L, M], where L is the latent audio length and M is the number of mel bins if patch_size is not None and patch_size_t is not None: # Packs the latents into a patch sequence of shape [B, L // p_t * M // p, C * p_t * p] (a ndim=3 tnesor). # dim=1 is the effective audio sequence length and dim=2 is the effective audio input feature size. batch_size, num_channels, latent_length, latent_mel_bins = latents.shape post_patch_latent_length = latent_length / patch_size_t post_patch_mel_bins = latent_mel_bins / patch_size latents = latents.reshape( batch_size, -1, post_patch_latent_length, patch_size_t, post_patch_mel_bins, patch_size ) latents = latents.permute(0, 2, 4, 1, 3, 5).flatten(3, 5).flatten(1, 2) else: # Packs the latents into a patch sequence of shape [B, L, C * M]. This implicitly assumes a (mel) # patch_size of M (all mel bins constitutes a single patch) and a patch_size_t of 1. latents = latents.transpose(1, 2).flatten(2, 3) # [B, C, L, M] --> [B, L, C * M] return latents @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._unpack_audio_latents def _unpack_audio_latents( latents: torch.Tensor, latent_length: int, num_mel_bins: int, patch_size: int | None = None, patch_size_t: int | None = None, ) -> torch.Tensor: # Unpacks an audio patch sequence of shape [B, S, D] into a latent spectrogram tensor of shape [B, C, L, M], # where L is the latent audio length and M is the number of mel bins. if patch_size is not None and patch_size_t is not None: batch_size = latents.size(0) latents = latents.reshape(batch_size, latent_length, num_mel_bins, -1, patch_size_t, patch_size) latents = latents.permute(0, 3, 1, 4, 2, 5).flatten(4, 5).flatten(2, 3) else: # Assume [B, S, D] = [B, L, C * M], which implies that patch_size = M and patch_size_t = 1. latents = latents.unflatten(2, (-1, num_mel_bins)).transpose(1, 2) return latents @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._normalize_audio_latents def _normalize_audio_latents(latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor): latents_mean = latents_mean.to(latents.device, latents.dtype) latents_std = latents_std.to(latents.device, latents.dtype) return (latents - latents_mean) / latents_std @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._denormalize_audio_latents def _denormalize_audio_latents(latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor): latents_mean = latents_mean.to(latents.device, latents.dtype) latents_std = latents_std.to(latents.device, latents.dtype) return (latents * latents_std) + latents_mean def prepare_latents( self, image: torch.Tensor | None = None, batch_size: int = 1, num_channels_latents: int = 128, height: int = 512, width: int = 704, num_frames: int = 161, noise_scale: float = 0.0, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: height = height // self.vae_spatial_compression_ratio width = width // self.vae_spatial_compression_ratio num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 shape = (batch_size, num_channels_latents, num_frames, height, width) mask_shape = (batch_size, 1, num_frames, height, width) if latents is not None: if latents.ndim == 5: # conditioning_mask needs to the same shape as latents in two stages generation. batch_size, _, num_frames, height, width = latents.shape mask_shape = (batch_size, 1, num_frames, height, width) conditioning_mask = latents.new_zeros(mask_shape) conditioning_mask[:, :, 0] = 1.0 latents = self._normalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) latents = self._create_noised_state(latents, noise_scale * (1 - conditioning_mask), generator) # latents are of shape [B, C, F, H, W], need to be packed latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) else: conditioning_mask = latents.new_zeros(mask_shape) conditioning_mask[:, :, 0] = 1.0 conditioning_mask = self._pack_latents( conditioning_mask, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ).squeeze(-1) if latents.ndim != 3 or latents.shape[:2] != conditioning_mask.shape: raise ValueError( f"Provided `latents` tensor has shape {latents.shape}, but the expected shape is {conditioning_mask.shape + (num_channels_latents,)}." ) return latents.to(device=device, dtype=dtype), conditioning_mask if isinstance(generator, list): if len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) init_latents = [ retrieve_latents(self.vae.encode(image[i].unsqueeze(0).unsqueeze(2)), generator[i], "argmax") for i in range(batch_size) ] else: init_latents = [ retrieve_latents(self.vae.encode(img.unsqueeze(0).unsqueeze(2)), generator, "argmax") for img in image ] init_latents = torch.cat(init_latents, dim=0).to(dtype) init_latents = self._normalize_latents(init_latents, self.vae.latents_mean, self.vae.latents_std) init_latents = init_latents.repeat(1, 1, num_frames, 1, 1) # First condition is image latents and those should be kept clean. conditioning_mask = torch.zeros(mask_shape, device=device, dtype=dtype) conditioning_mask[:, :, 0] = 1.0 noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # Interpolation. latents = init_latents * conditioning_mask + noise * (1 - conditioning_mask) conditioning_mask = self._pack_latents( conditioning_mask, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ).squeeze(-1) latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) return latents, conditioning_mask # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline.prepare_audio_latents def prepare_audio_latents( self, batch_size: int = 1, num_channels_latents: int = 8, audio_latent_length: int = 1, # 1 is just a dummy value num_mel_bins: int = 64, noise_scale: float = 0.0, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if latents is not None: if latents.ndim == 4: # latents are of shape [B, C, L, M], need to be packed latents = self._pack_audio_latents(latents) if latents.ndim != 3: raise ValueError( f"Provided `latents` tensor has shape {latents.shape}, but the expected shape is [batch_size, num_seq, num_features]." ) latents = self._normalize_audio_latents(latents, self.audio_vae.latents_mean, self.audio_vae.latents_std) latents = self._create_noised_state(latents, noise_scale, generator) return latents.to(device=device, dtype=dtype) # TODO: confirm whether this logic is correct latent_mel_bins = num_mel_bins // self.audio_vae_mel_compression_ratio shape = (batch_size, num_channels_latents, audio_latent_length, latent_mel_bins) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_audio_latents(latents) return latents @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def attention_kwargs(self): return self._attention_kwargs @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput = None, prompt: str | list[str] = None, negative_prompt: str | list[str] | None = None, height: int = 512, width: int = 768, num_frames: int = 121, frame_rate: float = 24.0, num_inference_steps: int = 40, sigmas: list[float] | None = None, timesteps: list[int] | None = None, guidance_scale: float = 4.0, guidance_rescale: float = 0.0, noise_scale: float = 0.0, num_videos_per_prompt: int = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, audio_latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, decode_timestep: float | list[float] = 0.0, decode_noise_scale: float | list[float] | None = None, output_type: str = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 1024, ): r""" Function invoked when calling the pipeline for generation. Args: image (`PipelineImageInput`): The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to `512`): The height in pixels of the generated image. This is set to 480 by default for the best results. width (`int`, *optional*, defaults to `768`): The width in pixels of the generated image. This is set to 848 by default for the best results. num_frames (`int`, *optional*, defaults to `121`): The number of video frames to generate frame_rate (`float`, *optional*, defaults to `24.0`): The frames per second (FPS) of the generated video. num_inference_steps (`int`, *optional*, defaults to 40): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to `4.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when using zero terminal SNR. noise_scale (`float`, *optional*, defaults to `0.0`): The interpolation factor between random noise and denoised latents at each timestep. Applying noise to the `latents` and `audio_latents` before continue denoising. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. audio_latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for audio generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for negative text embeddings. decode_timestep (`float`, defaults to `0.0`): The timestep at which generated video is decoded. decode_noise_scale (`float`, defaults to `None`): The interpolation factor between random noise and denoised latents at the decode timestep. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ltx.LTX2PipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to `1024`): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ltx.LTX2PipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ltx.LTX2PipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._attention_kwargs = attention_kwargs self._interrupt = False self._current_timestep = None # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Prepare text embeddings ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, max_sequence_length=max_sequence_length, device=device, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) additive_attention_mask = (1 - prompt_attention_mask.to(prompt_embeds.dtype)) * -1000000.0 connector_prompt_embeds, connector_audio_prompt_embeds, connector_attention_mask = self.connectors( prompt_embeds, additive_attention_mask, additive_mask=True ) # 4. Prepare latent variables latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio if latents is not None: if latents.ndim == 5: logger.info( "Got latents of shape [batch_size, latent_dim, latent_frames, latent_height, latent_width], `latent_num_frames`, `latent_height`, `latent_width` will be inferred." ) _, _, latent_num_frames, latent_height, latent_width = latents.shape # [B, C, F, H, W] elif latents.ndim == 3: logger.warning( f"You have supplied packed `latents` of shape {latents.shape}, so the latent dims cannot be" f" inferred. Make sure the supplied `height`, `width`, and `num_frames` are correct." ) else: raise ValueError( f"Provided `latents` tensor has shape {latents.shape}, but the expected shape is either [batch_size, seq_len, num_features] or [batch_size, latent_dim, latent_frames, latent_height, latent_width]." ) video_sequence_length = latent_num_frames * latent_height * latent_width if latents is None: image = self.video_processor.preprocess(image, height=height, width=width) image = image.to(device=device, dtype=prompt_embeds.dtype) num_channels_latents = self.transformer.config.in_channels latents, conditioning_mask = self.prepare_latents( image, batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, noise_scale, torch.float32, device, generator, latents, ) if self.do_classifier_free_guidance: conditioning_mask = torch.cat([conditioning_mask, conditioning_mask]) duration_s = num_frames / frame_rate audio_latents_per_second = ( self.audio_sampling_rate / self.audio_hop_length / float(self.audio_vae_temporal_compression_ratio) ) audio_num_frames = round(duration_s * audio_latents_per_second) if audio_latents is not None: if audio_latents.ndim == 4: logger.info( "Got audio_latents of shape [batch_size, num_channels, audio_length, mel_bins], `audio_num_frames` will be inferred." ) _, _, audio_num_frames, _ = audio_latents.shape # [B, C, L, M] elif audio_latents.ndim == 3: logger.warning( f"You have supplied packed `audio_latents` of shape {audio_latents.shape}, so the latent dims" f" cannot be inferred. Make sure the supplied `num_frames` and `frame_rate` are correct." ) else: raise ValueError( f"Provided `audio_latents` tensor has shape {audio_latents.shape}, but the expected shape is either [batch_size, seq_len, num_features] or [batch_size, num_channels, audio_length, mel_bins]." ) num_mel_bins = self.audio_vae.config.mel_bins if getattr(self, "audio_vae", None) is not None else 64 latent_mel_bins = num_mel_bins // self.audio_vae_mel_compression_ratio num_channels_latents_audio = ( self.audio_vae.config.latent_channels if getattr(self, "audio_vae", None) is not None else 8 ) audio_latents = self.prepare_audio_latents( batch_size * num_videos_per_prompt, num_channels_latents=num_channels_latents_audio, audio_latent_length=audio_num_frames, num_mel_bins=num_mel_bins, noise_scale=noise_scale, dtype=torch.float32, device=device, generator=generator, latents=audio_latents, ) # 5. Prepare timesteps sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas mu = calculate_shift( video_sequence_length, self.scheduler.config.get("base_image_seq_len", 1024), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.95), self.scheduler.config.get("max_shift", 2.05), ) # For now, duplicate the scheduler for use with the audio latents audio_scheduler = copy.deepcopy(self.scheduler) _, _ = retrieve_timesteps( audio_scheduler, num_inference_steps, device, timesteps, sigmas=sigmas, mu=mu, ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Prepare micro-conditions rope_interpolation_scale = ( self.vae_temporal_compression_ratio / frame_rate, self.vae_spatial_compression_ratio, self.vae_spatial_compression_ratio, ) # Pre-compute video and audio positional ids as they will be the same at each step of the denoising loop video_coords = self.transformer.rope.prepare_video_coords( latents.shape[0], latent_num_frames, latent_height, latent_width, latents.device, fps=frame_rate ) audio_coords = self.transformer.audio_rope.prepare_audio_coords( audio_latents.shape[0], audio_num_frames, audio_latents.device ) # Duplicate the positional ids as well if using CFG if self.do_classifier_free_guidance: video_coords = video_coords.repeat((2,) + (1,) * (video_coords.ndim - 1)) # Repeat twice in batch dim audio_coords = audio_coords.repeat((2,) + (1,) * (audio_coords.ndim - 1)) # 7. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = latent_model_input.to(prompt_embeds.dtype) audio_latent_model_input = ( torch.cat([audio_latents] * 2) if self.do_classifier_free_guidance else audio_latents ) audio_latent_model_input = audio_latent_model_input.to(prompt_embeds.dtype) timestep = t.expand(latent_model_input.shape[0]) video_timestep = timestep.unsqueeze(-1) * (1 - conditioning_mask) with self.transformer.cache_context("cond_uncond"): noise_pred_video, noise_pred_audio = self.transformer( hidden_states=latent_model_input, audio_hidden_states=audio_latent_model_input, encoder_hidden_states=connector_prompt_embeds, audio_encoder_hidden_states=connector_audio_prompt_embeds, timestep=video_timestep, audio_timestep=timestep, encoder_attention_mask=connector_attention_mask, audio_encoder_attention_mask=connector_attention_mask, num_frames=latent_num_frames, height=latent_height, width=latent_width, fps=frame_rate, audio_num_frames=audio_num_frames, video_coords=video_coords, audio_coords=audio_coords, # rope_interpolation_scale=rope_interpolation_scale, attention_kwargs=attention_kwargs, return_dict=False, ) noise_pred_video = noise_pred_video.float() noise_pred_audio = noise_pred_audio.float() if self.do_classifier_free_guidance: noise_pred_video_uncond, noise_pred_video_text = noise_pred_video.chunk(2) noise_pred_video = noise_pred_video_uncond + self.guidance_scale * ( noise_pred_video_text - noise_pred_video_uncond ) noise_pred_audio_uncond, noise_pred_audio_text = noise_pred_audio.chunk(2) noise_pred_audio = noise_pred_audio_uncond + self.guidance_scale * ( noise_pred_audio_text - noise_pred_audio_uncond ) if self.guidance_rescale > 0: # Based on 3.4. in https://huggingface.co/papers/2305.08891 noise_pred_video = rescale_noise_cfg( noise_pred_video, noise_pred_video_text, guidance_rescale=self.guidance_rescale ) noise_pred_audio = rescale_noise_cfg( noise_pred_audio, noise_pred_audio_text, guidance_rescale=self.guidance_rescale ) # compute the previous noisy sample x_t -> x_t-1 noise_pred_video = self._unpack_latents( noise_pred_video, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) noise_pred_video = noise_pred_video[:, :, 1:] noise_latents = latents[:, :, 1:] pred_latents = self.scheduler.step(noise_pred_video, t, noise_latents, return_dict=False)[0] latents = torch.cat([latents[:, :, :1], pred_latents], dim=2) latents = self._pack_latents( latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) # NOTE: for now duplicate scheduler for audio latents in case self.scheduler sets internal state in # the step method (such as _step_index) audio_latents = audio_scheduler.step(noise_pred_audio, t, audio_latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size, ) latents = self._denormalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) audio_latents = self._denormalize_audio_latents( audio_latents, self.audio_vae.latents_mean, self.audio_vae.latents_std ) audio_latents = self._unpack_audio_latents(audio_latents, audio_num_frames, num_mel_bins=latent_mel_bins) if output_type == "latent": video = latents audio = audio_latents else: latents = latents.to(prompt_embeds.dtype) if not self.vae.config.timestep_conditioning: timestep = None else: noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) if not isinstance(decode_timestep, list): decode_timestep = [decode_timestep] * batch_size if decode_noise_scale is None: decode_noise_scale = decode_timestep elif not isinstance(decode_noise_scale, list): decode_noise_scale = [decode_noise_scale] * batch_size timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype) decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[ :, None, None, None, None ] latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise latents = latents.to(self.vae.dtype) video = self.vae.decode(latents, timestep, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) audio_latents = audio_latents.to(self.audio_vae.dtype) generated_mel_spectrograms = self.audio_vae.decode(audio_latents, return_dict=False)[0] audio = self.vocoder(generated_mel_spectrograms) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video, audio) return LTX2PipelineOutput(frames=video, audio=audio)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx2/pipeline_ltx2_image2video.py", "license": "Apache License 2.0", "lines": 1174, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/ltx2/pipeline_ltx2_latent_upsample.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ...image_processor import PipelineImageInput from ...models import AutoencoderKLLTX2Video from ...utils import get_logger, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..ltx.pipeline_output import LTXPipelineOutput from ..pipeline_utils import DiffusionPipeline from .latent_upsampler import LTX2LatentUpsamplerModel logger = get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import LTX2ImageToVideoPipeline, LTX2LatentUpsamplePipeline >>> from diffusers.pipelines.ltx2.export_utils import encode_video >>> from diffusers.pipelines.ltx2.latent_upsampler import LTX2LatentUpsamplerModel >>> from diffusers.utils import load_image >>> pipe = LTX2ImageToVideoPipeline.from_pretrained("Lightricks/LTX-2", torch_dtype=torch.bfloat16) >>> pipe.enable_model_cpu_offload() >>> image = load_image( ... "https://huggingface.co/datasets/a-r-r-o-w/tiny-meme-dataset-captioned/resolve/main/images/8.png" ... ) >>> prompt = "A young girl stands calmly in the foreground, looking directly at the camera, as a house fire rages in the background." >>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" >>> frame_rate = 24.0 >>> video, audio = pipe( ... image=image, ... prompt=prompt, ... negative_prompt=negative_prompt, ... width=768, ... height=512, ... num_frames=121, ... frame_rate=frame_rate, ... num_inference_steps=40, ... guidance_scale=4.0, ... output_type="pil", ... return_dict=False, ... ) >>> latent_upsampler = LTX2LatentUpsamplerModel.from_pretrained( ... "Lightricks/LTX-2", subfolder="latent_upsampler", torch_dtype=torch.bfloat16 ... ) >>> upsample_pipe = LTX2LatentUpsamplePipeline(vae=pipe.vae, latent_upsampler=latent_upsampler) >>> upsample_pipe.vae.enable_tiling() >>> upsample_pipe.to(device="cuda", dtype=torch.bfloat16) >>> video = upsample_pipe( ... video=video, ... width=768, ... height=512, ... output_type="np", ... return_dict=False, ... )[0] >>> encode_video( ... video[0], ... fps=frame_rate, ... audio=audio[0].float().cpu(), ... audio_sample_rate=pipe.vocoder.config.output_sampling_rate, # should be 24000 ... output_path="video.mp4", ... ) ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class LTX2LatentUpsamplePipeline(DiffusionPipeline): model_cpu_offload_seq = "vae->latent_upsampler" def __init__( self, vae: AutoencoderKLLTX2Video, latent_upsampler: LTX2LatentUpsamplerModel, ) -> None: super().__init__() self.register_modules(vae=vae, latent_upsampler=latent_upsampler) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) def prepare_latents( self, video: torch.Tensor | None = None, batch_size: int = 1, num_frames: int = 121, height: int = 512, width: int = 768, spatial_patch_size: int = 1, temporal_patch_size: int = 1, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if latents is not None: if latents.ndim == 3: # Convert token seq [B, S, D] to latent video [B, C, F, H, W] latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio latents = self._unpack_latents( latents, latent_num_frames, latent_height, latent_width, spatial_patch_size, temporal_patch_size ) return latents.to(device=device, dtype=dtype) video = video.to(device=device, dtype=self.vae.dtype) if isinstance(generator, list): if len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) init_latents = [ retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator[i]) for i in range(batch_size) ] else: init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video] init_latents = torch.cat(init_latents, dim=0).to(dtype) # NOTE: latent upsampler operates on the unnormalized latents, so don't normalize here # init_latents = self._normalize_latents(init_latents, self.vae.latents_mean, self.vae.latents_std) return init_latents def adain_filter_latent(self, latents: torch.Tensor, reference_latents: torch.Tensor, factor: float = 1.0): """ Applies Adaptive Instance Normalization (AdaIN) to a latent tensor based on statistics from a reference latent tensor. Args: latent (`torch.Tensor`): Input latents to normalize reference_latents (`torch.Tensor`): The reference latents providing style statistics. factor (`float`): Blending factor between original and transformed latent. Range: -10.0 to 10.0, Default: 1.0 Returns: torch.Tensor: The transformed latent tensor """ result = latents.clone() for i in range(latents.size(0)): for c in range(latents.size(1)): r_sd, r_mean = torch.std_mean(reference_latents[i, c], dim=None) # index by original dim order i_sd, i_mean = torch.std_mean(result[i, c], dim=None) result[i, c] = ((result[i, c] - i_mean) / i_sd) * r_sd + r_mean result = torch.lerp(latents, result, factor) return result def tone_map_latents(self, latents: torch.Tensor, compression: float) -> torch.Tensor: """ Applies a non-linear tone-mapping function to latent values to reduce their dynamic range in a perceptually smooth way using a sigmoid-based compression. This is useful for regularizing high-variance latents or for conditioning outputs during generation, especially when controlling dynamic behavior with a `compression` factor. Args: latents : torch.Tensor Input latent tensor with arbitrary shape. Expected to be roughly in [-1, 1] or [0, 1] range. compression : float Compression strength in the range [0, 1]. - 0.0: No tone-mapping (identity transform) - 1.0: Full compression effect Returns: torch.Tensor The tone-mapped latent tensor of the same shape as input. """ # Remap [0-1] to [0-0.75] and apply sigmoid compression in one shot scale_factor = compression * 0.75 abs_latents = torch.abs(latents) # Sigmoid compression: sigmoid shifts large values toward 0.2, small values stay ~1.0 # When scale_factor=0, sigmoid term vanishes, when scale_factor=0.75, full effect sigmoid_term = torch.sigmoid(4.0 * scale_factor * (abs_latents - 1.0)) scales = 1.0 - 0.8 * scale_factor * sigmoid_term filtered = latents * scales return filtered @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._denormalize_latents def _denormalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Denormalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / scaling_factor + latents_mean return latents @staticmethod # Copied from diffusers.pipelines.ltx2.pipeline_ltx2.LTX2Pipeline._unpack_latents def _unpack_latents( latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1 ) -> torch.Tensor: # Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions) # are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of # what happens in the `_pack_latents` method. batch_size = latents.size(0) latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size) latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3) return latents def check_inputs(self, video, height, width, latents, tone_map_compression_ratio): if height % self.vae_spatial_compression_ratio != 0 or width % self.vae_spatial_compression_ratio != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if video is not None and latents is not None: raise ValueError("Only one of `video` or `latents` can be provided.") if video is None and latents is None: raise ValueError("One of `video` or `latents` has to be provided.") if not (0 <= tone_map_compression_ratio <= 1): raise ValueError("`tone_map_compression_ratio` must be in the range [0, 1]") @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, video: list[PipelineImageInput] | None = None, height: int = 512, width: int = 768, num_frames: int = 121, spatial_patch_size: int = 1, temporal_patch_size: int = 1, latents: torch.Tensor | None = None, latents_normalized: bool = False, decode_timestep: float | list[float] = 0.0, decode_noise_scale: float | list[float] | None = None, adain_factor: float = 0.0, tone_map_compression_ratio: float = 0.0, generator: torch.Generator | list[torch.Generator] | None = None, output_type: str | None = "pil", return_dict: bool = True, ): r""" Function invoked when calling the pipeline for generation. Args: video (`list[PipelineImageInput]`, *optional*) The video to be upsampled (such as a LTX 2.0 first stage output). If not supplied, `latents` should be supplied. height (`int`, *optional*, defaults to `512`): The height in pixels of the input video (not the generated video, which will have a larger resolution). width (`int`, *optional*, defaults to `768`): The width in pixels of the input video (not the generated video, which will have a larger resolution). num_frames (`int`, *optional*, defaults to `121`): The number of frames in the input video. spatial_patch_size (`int`, *optional*, defaults to `1`): The spatial patch size of the video latents. Used when `latents` is supplied if unpacking is necessary. temporal_patch_size (`int`, *optional*, defaults to `1`): The temporal patch size of the video latents. Used when `latents` is supplied if unpacking is necessary. latents (`torch.Tensor`, *optional*): Pre-generated video latents. This can be supplied in place of the `video` argument. Can either be a patch sequence of shape `(batch_size, seq_len, hidden_dim)` or a video latent of shape `(batch_size, latent_channels, latent_frames, latent_height, latent_width)`. latents_normalized (`bool`, *optional*, defaults to `False`) If `latents` are supplied, whether the `latents` are normalized using the VAE latent mean and std. If `True`, the `latents` will be denormalized before being supplied to the latent upsampler. decode_timestep (`float`, defaults to `0.0`): The timestep at which generated video is decoded. decode_noise_scale (`float`, defaults to `None`): The interpolation factor between random noise and denoised latents at the decode timestep. adain_factor (`float`, *optional*, defaults to `0.0`): Adaptive Instance Normalization (AdaIN) blending factor between the upsampled and original latents. Should be in [-10.0, 10.0]; supplying 0.0 (the default) means that AdaIN is not performed. tone_map_compression_ratio (`float`, *optional*, defaults to `0.0`): The compression strength for tone mapping, which will reduce the dynamic range of the latent values. This is useful for regularizing high-variance latents or for conditioning outputs during generation. Should be in [0, 1], where 0.0 (the default) means tone mapping is not applied and 1.0 corresponds to the full compression effect. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ltx.LTXPipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ltx.LTXPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ltx.LTXPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is the upsampled video. """ self.check_inputs( video=video, height=height, width=width, latents=latents, tone_map_compression_ratio=tone_map_compression_ratio, ) if video is not None: # Batched video input is not yet tested/supported. TODO: take a look later batch_size = 1 else: batch_size = latents.shape[0] device = self._execution_device if video is not None: num_frames = len(video) if num_frames % self.vae_temporal_compression_ratio != 1: num_frames = ( num_frames // self.vae_temporal_compression_ratio * self.vae_temporal_compression_ratio + 1 ) video = video[:num_frames] logger.warning( f"Video length expected to be of the form `k * {self.vae_temporal_compression_ratio} + 1` but is {len(video)}. Truncating to {num_frames} frames." ) video = self.video_processor.preprocess_video(video, height=height, width=width) video = video.to(device=device, dtype=torch.float32) latents_supplied = latents is not None latents = self.prepare_latents( video=video, batch_size=batch_size, num_frames=num_frames, height=height, width=width, spatial_patch_size=spatial_patch_size, temporal_patch_size=temporal_patch_size, dtype=torch.float32, device=device, generator=generator, latents=latents, ) if latents_supplied and latents_normalized: latents = self._denormalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) latents = latents.to(self.latent_upsampler.dtype) latents_upsampled = self.latent_upsampler(latents) if adain_factor > 0.0: latents = self.adain_filter_latent(latents_upsampled, latents, adain_factor) else: latents = latents_upsampled if tone_map_compression_ratio > 0.0: latents = self.tone_map_latents(latents, tone_map_compression_ratio) if output_type == "latent": video = latents else: if not self.vae.config.timestep_conditioning: timestep = None else: noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) if not isinstance(decode_timestep, list): decode_timestep = [decode_timestep] * batch_size if decode_noise_scale is None: decode_noise_scale = decode_timestep elif not isinstance(decode_noise_scale, list): decode_noise_scale = [decode_noise_scale] * batch_size timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype) decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[ :, None, None, None, None ] latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise video = self.vae.decode(latents, timestep, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return LTXPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx2/pipeline_ltx2_latent_upsample.py", "license": "Apache License 2.0", "lines": 367, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/ltx2/pipeline_output.py
from dataclasses import dataclass import torch from diffusers.utils import BaseOutput @dataclass class LTX2PipelineOutput(BaseOutput): r""" Output class for LTX pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or list[list[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. audio (`torch.Tensor`, `np.ndarray`): TODO """ frames: torch.Tensor audio: torch.Tensor
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx2/pipeline_output.py", "license": "Apache License 2.0", "lines": 17, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:src/diffusers/pipelines/ltx2/vocoder.py
import math import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class ResBlock(nn.Module): def __init__( self, channels: int, kernel_size: int = 3, stride: int = 1, dilations: tuple[int, ...] = (1, 3, 5), leaky_relu_negative_slope: float = 0.1, padding_mode: str = "same", ): super().__init__() self.dilations = dilations self.negative_slope = leaky_relu_negative_slope self.convs1 = nn.ModuleList( [ nn.Conv1d(channels, channels, kernel_size, stride=stride, dilation=dilation, padding=padding_mode) for dilation in dilations ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d(channels, channels, kernel_size, stride=stride, dilation=1, padding=padding_mode) for _ in range(len(dilations)) ] ) def forward(self, x: torch.Tensor) -> torch.Tensor: for conv1, conv2 in zip(self.convs1, self.convs2): xt = F.leaky_relu(x, negative_slope=self.negative_slope) xt = conv1(xt) xt = F.leaky_relu(xt, negative_slope=self.negative_slope) xt = conv2(xt) x = x + xt return x class LTX2Vocoder(ModelMixin, ConfigMixin): r""" LTX 2.0 vocoder for converting generated mel spectrograms back to audio waveforms. """ @register_to_config def __init__( self, in_channels: int = 128, hidden_channels: int = 1024, out_channels: int = 2, upsample_kernel_sizes: list[int] = [16, 15, 8, 4, 4], upsample_factors: list[int] = [6, 5, 2, 2, 2], resnet_kernel_sizes: list[int] = [3, 7, 11], resnet_dilations: list[list[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]], leaky_relu_negative_slope: float = 0.1, output_sampling_rate: int = 24000, ): super().__init__() self.num_upsample_layers = len(upsample_kernel_sizes) self.resnets_per_upsample = len(resnet_kernel_sizes) self.out_channels = out_channels self.total_upsample_factor = math.prod(upsample_factors) self.negative_slope = leaky_relu_negative_slope if self.num_upsample_layers != len(upsample_factors): raise ValueError( f"`upsample_kernel_sizes` and `upsample_factors` should be lists of the same length but are length" f" {self.num_upsample_layers} and {len(upsample_factors)}, respectively." ) if self.resnets_per_upsample != len(resnet_dilations): raise ValueError( f"`resnet_kernel_sizes` and `resnet_dilations` should be lists of the same length but are length" f" {len(self.resnets_per_upsample)} and {len(resnet_dilations)}, respectively." ) self.conv_in = nn.Conv1d(in_channels, hidden_channels, kernel_size=7, stride=1, padding=3) self.upsamplers = nn.ModuleList() self.resnets = nn.ModuleList() input_channels = hidden_channels for i, (stride, kernel_size) in enumerate(zip(upsample_factors, upsample_kernel_sizes)): output_channels = input_channels // 2 self.upsamplers.append( nn.ConvTranspose1d( input_channels, # hidden_channels // (2 ** i) output_channels, # hidden_channels // (2 ** (i + 1)) kernel_size, stride=stride, padding=(kernel_size - stride) // 2, ) ) for kernel_size, dilations in zip(resnet_kernel_sizes, resnet_dilations): self.resnets.append( ResBlock( output_channels, kernel_size, dilations=dilations, leaky_relu_negative_slope=leaky_relu_negative_slope, ) ) input_channels = output_channels self.conv_out = nn.Conv1d(output_channels, out_channels, 7, stride=1, padding=3) def forward(self, hidden_states: torch.Tensor, time_last: bool = False) -> torch.Tensor: r""" Forward pass of the vocoder. Args: hidden_states (`torch.Tensor`): Input Mel spectrogram tensor of shape `(batch_size, num_channels, time, num_mel_bins)` if `time_last` is `False` (the default) or shape `(batch_size, num_channels, num_mel_bins, time)` if `time_last` is `True`. time_last (`bool`, *optional*, defaults to `False`): Whether the last dimension of the input is the time/frame dimension or the Mel bins dimension. Returns: `torch.Tensor`: Audio waveform tensor of shape (batch_size, out_channels, audio_length) """ # Ensure that the time/frame dimension is last if not time_last: hidden_states = hidden_states.transpose(2, 3) # Combine channels and frequency (mel bins) dimensions hidden_states = hidden_states.flatten(1, 2) hidden_states = self.conv_in(hidden_states) for i in range(self.num_upsample_layers): hidden_states = F.leaky_relu(hidden_states, negative_slope=self.negative_slope) hidden_states = self.upsamplers[i](hidden_states) # Run all resnets in parallel on hidden_states start = i * self.resnets_per_upsample end = (i + 1) * self.resnets_per_upsample resnet_outputs = torch.stack([self.resnets[j](hidden_states) for j in range(start, end)], dim=0) hidden_states = torch.mean(resnet_outputs, dim=0) # NOTE: unlike the first leaky ReLU, this leaky ReLU is set to use the default F.leaky_relu negative slope of # 0.01 (whereas the others usually use a slope of 0.1). Not sure if this is intended hidden_states = F.leaky_relu(hidden_states, negative_slope=0.01) hidden_states = self.conv_out(hidden_states) hidden_states = torch.tanh(hidden_states) return hidden_states
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx2/vocoder.py", "license": "Apache License 2.0", "lines": 132, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:tests/models/autoencoders/test_models_autoencoder_kl_ltx2_audio.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers import AutoencoderKLLTX2Audio from ...testing_utils import ( floats_tensor, torch_device, ) from ..test_modeling_common import ModelTesterMixin from .testing_utils import AutoencoderTesterMixin class AutoencoderKLLTX2AudioTests(ModelTesterMixin, AutoencoderTesterMixin, unittest.TestCase): model_class = AutoencoderKLLTX2Audio main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_ltx_video_config(self): return { "in_channels": 2, # stereo, "output_channels": 2, "latent_channels": 4, "base_channels": 16, "ch_mult": (1, 2, 4), "resolution": 16, "attn_resolutions": None, "num_res_blocks": 2, "norm_type": "pixel", "causality_axis": "height", "mid_block_add_attention": False, "sample_rate": 16000, "mel_hop_length": 160, "mel_bins": 16, "is_causal": True, "double_z": True, } @property def dummy_input(self): batch_size = 2 num_channels = 2 num_frames = 8 num_mel_bins = 16 spectrogram = floats_tensor((batch_size, num_channels, num_frames, num_mel_bins)).to(torch_device) input_dict = {"sample": spectrogram} return input_dict @property def input_shape(self): return (2, 5, 16) @property def output_shape(self): return (2, 5, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_ltx_video_config() inputs_dict = self.dummy_input return init_dict, inputs_dict # Overriding as output shape is not the same as input shape for LTX 2.0 audio VAE def test_output(self): super().test_output(expected_output_shape=(2, 2, 5, 16)) @unittest.skip("Unsupported test.") def test_outputs_equivalence(self): pass @unittest.skip("AutoencoderKLLTX2Audio does not support `norm_num_groups` because it does not use GroupNorm.") def test_forward_with_norm_groups(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/autoencoders/test_models_autoencoder_kl_ltx2_audio.py", "license": "Apache License 2.0", "lines": 73, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/models/autoencoders/test_models_autoencoder_ltx2_video.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers import AutoencoderKLLTX2Video from ...testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ..test_modeling_common import ModelTesterMixin from .testing_utils import AutoencoderTesterMixin enable_full_determinism() class AutoencoderKLLTX2VideoTests(ModelTesterMixin, AutoencoderTesterMixin, unittest.TestCase): model_class = AutoencoderKLLTX2Video main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_kl_ltx_video_config(self): return { "in_channels": 3, "out_channels": 3, "latent_channels": 8, "block_out_channels": (8, 8, 8, 8), "decoder_block_out_channels": (16, 32, 64), "layers_per_block": (1, 1, 1, 1, 1), "decoder_layers_per_block": (1, 1, 1, 1), "spatio_temporal_scaling": (True, True, True, True), "decoder_spatio_temporal_scaling": (True, True, True), "decoder_inject_noise": (False, False, False, False), "downsample_type": ("spatial", "temporal", "spatiotemporal", "spatiotemporal"), "upsample_residual": (True, True, True), "upsample_factor": (2, 2, 2), "timestep_conditioning": False, "patch_size": 1, "patch_size_t": 1, "encoder_causal": True, "decoder_causal": False, "encoder_spatial_padding_mode": "zeros", # Full model uses `reflect` but this does not have deterministic backward implementation, so use `zeros` "decoder_spatial_padding_mode": "zeros", } @property def dummy_input(self): batch_size = 2 num_frames = 9 num_channels = 3 sizes = (16, 16) image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) input_dict = {"sample": image} return input_dict @property def input_shape(self): return (3, 9, 16, 16) @property def output_shape(self): return (3, 9, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_kl_ltx_video_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = { "LTX2VideoEncoder3d", "LTX2VideoDecoder3d", "LTX2VideoDownBlock3D", "LTX2VideoMidBlock3d", "LTX2VideoUpBlock3d", } super().test_gradient_checkpointing_is_applied(expected_set=expected_set) @unittest.skip("Unsupported test.") def test_outputs_equivalence(self): pass @unittest.skip("AutoencoderKLLTXVideo does not support `norm_num_groups` because it does not use GroupNorm.") def test_forward_with_norm_groups(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/autoencoders/test_models_autoencoder_ltx2_video.py", "license": "Apache License 2.0", "lines": 86, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/models/transformers/test_models_transformer_ltx2.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import LTX2VideoTransformer3DModel from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin enable_full_determinism() class LTX2TransformerTests(ModelTesterMixin, unittest.TestCase): model_class = LTX2VideoTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): # Common batch_size = 2 # Video num_frames = 2 num_channels = 4 height = 16 width = 16 # Audio audio_num_frames = 9 audio_num_channels = 2 num_mel_bins = 2 # Text embedding_dim = 16 sequence_length = 16 hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device) audio_hidden_states = torch.randn((batch_size, audio_num_frames, audio_num_channels * num_mel_bins)).to( torch_device ) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) audio_encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device) timestep = torch.rand((batch_size,)).to(torch_device) * 1000 return { "hidden_states": hidden_states, "audio_hidden_states": audio_hidden_states, "encoder_hidden_states": encoder_hidden_states, "audio_encoder_hidden_states": audio_encoder_hidden_states, "timestep": timestep, "encoder_attention_mask": encoder_attention_mask, "num_frames": num_frames, "height": height, "width": width, "audio_num_frames": audio_num_frames, "fps": 25.0, } @property def input_shape(self): return (512, 4) @property def output_shape(self): return (512, 4) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 4, "out_channels": 4, "patch_size": 1, "patch_size_t": 1, "num_attention_heads": 2, "attention_head_dim": 8, "cross_attention_dim": 16, "audio_in_channels": 4, "audio_out_channels": 4, "audio_num_attention_heads": 2, "audio_attention_head_dim": 4, "audio_cross_attention_dim": 8, "num_layers": 2, "qk_norm": "rms_norm_across_heads", "caption_channels": 16, "rope_double_precision": False, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"LTX2VideoTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) # def test_ltx2_consistency(self, seed=0, dtype=torch.float32): # torch.manual_seed(seed) # init_dict, _ = self.prepare_init_args_and_inputs_for_common() # # Calculate dummy inputs in a custom manner to ensure compatibility with original code # batch_size = 2 # num_frames = 9 # latent_frames = 2 # text_embedding_dim = 16 # text_seq_len = 16 # fps = 25.0 # sampling_rate = 16000.0 # hop_length = 160.0 # sigma = torch.rand((1,), generator=torch.manual_seed(seed), dtype=dtype, device="cpu") * 1000 # timestep = (sigma * torch.ones((batch_size,), dtype=dtype, device="cpu")).to(device=torch_device) # num_channels = 4 # latent_height = 4 # latent_width = 4 # hidden_states = torch.randn( # (batch_size, num_channels, latent_frames, latent_height, latent_width), # generator=torch.manual_seed(seed), # dtype=dtype, # device="cpu", # ) # # Patchify video latents (with patch_size (1, 1, 1)) # hidden_states = hidden_states.reshape(batch_size, -1, latent_frames, 1, latent_height, 1, latent_width, 1) # hidden_states = hidden_states.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3) # encoder_hidden_states = torch.randn( # (batch_size, text_seq_len, text_embedding_dim), # generator=torch.manual_seed(seed), # dtype=dtype, # device="cpu", # ) # audio_num_channels = 2 # num_mel_bins = 2 # latent_length = int((sampling_rate / hop_length / 4) * (num_frames / fps)) # audio_hidden_states = torch.randn( # (batch_size, audio_num_channels, latent_length, num_mel_bins), # generator=torch.manual_seed(seed), # dtype=dtype, # device="cpu", # ) # # Patchify audio latents # audio_hidden_states = audio_hidden_states.transpose(1, 2).flatten(2, 3) # audio_encoder_hidden_states = torch.randn( # (batch_size, text_seq_len, text_embedding_dim), # generator=torch.manual_seed(seed), # dtype=dtype, # device="cpu", # ) # inputs_dict = { # "hidden_states": hidden_states.to(device=torch_device), # "audio_hidden_states": audio_hidden_states.to(device=torch_device), # "encoder_hidden_states": encoder_hidden_states.to(device=torch_device), # "audio_encoder_hidden_states": audio_encoder_hidden_states.to(device=torch_device), # "timestep": timestep, # "num_frames": latent_frames, # "height": latent_height, # "width": latent_width, # "audio_num_frames": num_frames, # "fps": 25.0, # } # model = self.model_class.from_pretrained( # "diffusers-internal-dev/dummy-ltx2", # subfolder="transformer", # device_map="cpu", # ) # # torch.manual_seed(seed) # # model = self.model_class(**init_dict) # model.to(torch_device) # model.eval() # with attention_backend("native"): # with torch.no_grad(): # output = model(**inputs_dict) # video_output, audio_output = output.to_tuple() # self.assertIsNotNone(video_output) # self.assertIsNotNone(audio_output) # # input & output have to have the same shape # video_expected_shape = (batch_size, latent_frames * latent_height * latent_width, num_channels) # self.assertEqual(video_output.shape, video_expected_shape, "Video input and output shapes do not match") # audio_expected_shape = (batch_size, latent_length, audio_num_channels * num_mel_bins) # self.assertEqual(audio_output.shape, audio_expected_shape, "Audio input and output shapes do not match") # # Check against expected slice # # fmt: off # video_expected_slice = torch.tensor([0.4783, 1.6954, -1.2092, 0.1762, 0.7801, 1.2025, -1.4525, -0.2721, 0.3354, 1.9144, -1.5546, 0.0831, 0.4391, 1.7012, -1.7373, -0.2676]) # audio_expected_slice = torch.tensor([-0.4236, 0.4750, 0.3901, -0.4339, -0.2782, 0.4357, 0.4526, -0.3927, -0.0980, 0.4870, 0.3964, -0.3169, -0.3974, 0.4408, 0.3809, -0.4692]) # # fmt: on # video_output_flat = video_output.cpu().flatten().float() # video_generated_slice = torch.cat([video_output_flat[:8], video_output_flat[-8:]]) # self.assertTrue(torch.allclose(video_generated_slice, video_expected_slice, atol=1e-4)) # audio_output_flat = audio_output.cpu().flatten().float() # audio_generated_slice = torch.cat([audio_output_flat[:8], audio_output_flat[-8:]]) # self.assertTrue(torch.allclose(audio_generated_slice, audio_expected_slice, atol=1e-4)) class LTX2TransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = LTX2VideoTransformer3DModel def prepare_init_args_and_inputs_for_common(self): return LTX2TransformerTests().prepare_init_args_and_inputs_for_common()
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_ltx2.py", "license": "Apache License 2.0", "lines": 187, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/ltx2/test_ltx2.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from transformers import AutoTokenizer, Gemma3ForConditionalGeneration from diffusers import ( AutoencoderKLLTX2Audio, AutoencoderKLLTX2Video, FlowMatchEulerDiscreteScheduler, LTX2Pipeline, LTX2VideoTransformer3DModel, ) from diffusers.pipelines.ltx2 import LTX2TextConnectors from diffusers.pipelines.ltx2.vocoder import LTX2Vocoder from ...testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class LTX2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = LTX2Pipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "audio_latents", "output_type", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_attention_slicing = False test_xformers_attention = False supports_dduf = False base_text_encoder_ckpt_id = "hf-internal-testing/tiny-gemma3" def get_dummy_components(self): tokenizer = AutoTokenizer.from_pretrained(self.base_text_encoder_ckpt_id) text_encoder = Gemma3ForConditionalGeneration.from_pretrained(self.base_text_encoder_ckpt_id) torch.manual_seed(0) transformer = LTX2VideoTransformer3DModel( in_channels=4, out_channels=4, patch_size=1, patch_size_t=1, num_attention_heads=2, attention_head_dim=8, cross_attention_dim=16, audio_in_channels=4, audio_out_channels=4, audio_num_attention_heads=2, audio_attention_head_dim=4, audio_cross_attention_dim=8, num_layers=2, qk_norm="rms_norm_across_heads", caption_channels=text_encoder.config.text_config.hidden_size, rope_double_precision=False, rope_type="split", ) torch.manual_seed(0) connectors = LTX2TextConnectors( caption_channels=text_encoder.config.text_config.hidden_size, text_proj_in_factor=text_encoder.config.text_config.num_hidden_layers + 1, video_connector_num_attention_heads=4, video_connector_attention_head_dim=8, video_connector_num_layers=1, video_connector_num_learnable_registers=None, audio_connector_num_attention_heads=4, audio_connector_attention_head_dim=8, audio_connector_num_layers=1, audio_connector_num_learnable_registers=None, connector_rope_base_seq_len=32, rope_theta=10000.0, rope_double_precision=False, causal_temporal_positioning=False, rope_type="split", ) torch.manual_seed(0) vae = AutoencoderKLLTX2Video( in_channels=3, out_channels=3, latent_channels=4, block_out_channels=(8,), decoder_block_out_channels=(8,), layers_per_block=(1,), decoder_layers_per_block=(1, 1), spatio_temporal_scaling=(True,), decoder_spatio_temporal_scaling=(True,), decoder_inject_noise=(False, False), downsample_type=("spatial",), upsample_residual=(False,), upsample_factor=(1,), timestep_conditioning=False, patch_size=1, patch_size_t=1, encoder_causal=True, decoder_causal=False, ) vae.use_framewise_encoding = False vae.use_framewise_decoding = False torch.manual_seed(0) audio_vae = AutoencoderKLLTX2Audio( base_channels=4, output_channels=2, ch_mult=(1,), num_res_blocks=1, attn_resolutions=None, in_channels=2, resolution=32, latent_channels=2, norm_type="pixel", causality_axis="height", dropout=0.0, mid_block_add_attention=False, sample_rate=16000, mel_hop_length=160, is_causal=True, mel_bins=8, ) torch.manual_seed(0) vocoder = LTX2Vocoder( in_channels=audio_vae.config.output_channels * audio_vae.config.mel_bins, hidden_channels=32, out_channels=2, upsample_kernel_sizes=[4, 4], upsample_factors=[2, 2], resnet_kernel_sizes=[3], resnet_dilations=[[1, 3, 5]], leaky_relu_negative_slope=0.1, output_sampling_rate=16000, ) scheduler = FlowMatchEulerDiscreteScheduler() components = { "transformer": transformer, "vae": vae, "audio_vae": audio_vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "connectors": connectors, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "a robot dancing", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "height": 32, "width": 32, "num_frames": 5, "frame_rate": 25.0, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) video = output.frames audio = output.audio self.assertEqual(video.shape, (1, 5, 3, 32, 32)) self.assertEqual(audio.shape[0], 1) self.assertEqual(audio.shape[1], components["vocoder"].config.out_channels) # fmt: off expected_video_slice = torch.tensor( [ 0.4331, 0.6203, 0.3245, 0.7294, 0.4822, 0.5703, 0.2999, 0.7700, 0.4961, 0.4242, 0.4581, 0.4351, 0.1137, 0.4437, 0.6304, 0.3184 ] ) expected_audio_slice = torch.tensor( [ 0.0263, 0.0528, 0.1217, 0.1104, 0.1632, 0.1072, 0.1789, 0.0949, 0.0672, -0.0069, 0.0688, 0.0097, 0.0808, 0.1231, 0.0986, 0.0739 ] ) # fmt: on video = video.flatten() audio = audio.flatten() generated_video_slice = torch.cat([video[:8], video[-8:]]) generated_audio_slice = torch.cat([audio[:8], audio[-8:]]) assert torch.allclose(expected_video_slice, generated_video_slice, atol=1e-4, rtol=1e-4) assert torch.allclose(expected_audio_slice, generated_audio_slice, atol=1e-4, rtol=1e-4) def test_two_stages_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "latent" first_stage_output = pipe(**inputs) video_latent = first_stage_output.frames audio_latent = first_stage_output.audio self.assertEqual(video_latent.shape, (1, 4, 3, 16, 16)) self.assertEqual(audio_latent.shape, (1, 2, 5, 2)) self.assertEqual(audio_latent.shape[1], components["vocoder"].config.out_channels) inputs["latents"] = video_latent inputs["audio_latents"] = audio_latent inputs["output_type"] = "pt" second_stage_output = pipe(**inputs) video = second_stage_output.frames audio = second_stage_output.audio self.assertEqual(video.shape, (1, 5, 3, 32, 32)) self.assertEqual(audio.shape[0], 1) self.assertEqual(audio.shape[1], components["vocoder"].config.out_channels) # fmt: off expected_video_slice = torch.tensor( [ 0.5514, 0.5943, 0.4260, 0.5971, 0.4306, 0.6369, 0.3124, 0.6964, 0.5419, 0.2412, 0.3882, 0.4504, 0.1941, 0.3404, 0.6037, 0.2464 ] ) expected_audio_slice = torch.tensor( [ 0.0252, 0.0526, 0.1211, 0.1119, 0.1638, 0.1042, 0.1776, 0.0948, 0.0672, -0.0069, 0.0688, 0.0097, 0.0808, 0.1231, 0.0986, 0.0739 ] ) # fmt: on video = video.flatten() audio = audio.flatten() generated_video_slice = torch.cat([video[:8], video[-8:]]) generated_audio_slice = torch.cat([audio[:8], audio[-8:]]) assert torch.allclose(expected_video_slice, generated_video_slice, atol=1e-4, rtol=1e-4) assert torch.allclose(expected_audio_slice, generated_audio_slice, atol=1e-4, rtol=1e-4) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=2e-2)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/ltx2/test_ltx2.py", "license": "Apache License 2.0", "lines": 251, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/ltx2/test_ltx2_image2video.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from transformers import AutoTokenizer, Gemma3ForConditionalGeneration from diffusers import ( AutoencoderKLLTX2Audio, AutoencoderKLLTX2Video, FlowMatchEulerDiscreteScheduler, LTX2ImageToVideoPipeline, LTX2VideoTransformer3DModel, ) from diffusers.pipelines.ltx2 import LTX2LatentUpsamplePipeline, LTX2TextConnectors from diffusers.pipelines.ltx2.latent_upsampler import LTX2LatentUpsamplerModel from diffusers.pipelines.ltx2.vocoder import LTX2Vocoder from ...testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class LTX2ImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = LTX2ImageToVideoPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image"}) image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "audio_latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_attention_slicing = False test_xformers_attention = False supports_dduf = False base_text_encoder_ckpt_id = "hf-internal-testing/tiny-gemma3" def get_dummy_components(self): tokenizer = AutoTokenizer.from_pretrained(self.base_text_encoder_ckpt_id) text_encoder = Gemma3ForConditionalGeneration.from_pretrained(self.base_text_encoder_ckpt_id) torch.manual_seed(0) transformer = LTX2VideoTransformer3DModel( in_channels=4, out_channels=4, patch_size=1, patch_size_t=1, num_attention_heads=2, attention_head_dim=8, cross_attention_dim=16, audio_in_channels=4, audio_out_channels=4, audio_num_attention_heads=2, audio_attention_head_dim=4, audio_cross_attention_dim=8, num_layers=2, qk_norm="rms_norm_across_heads", caption_channels=text_encoder.config.text_config.hidden_size, rope_double_precision=False, rope_type="split", ) torch.manual_seed(0) connectors = LTX2TextConnectors( caption_channels=text_encoder.config.text_config.hidden_size, text_proj_in_factor=text_encoder.config.text_config.num_hidden_layers + 1, video_connector_num_attention_heads=4, video_connector_attention_head_dim=8, video_connector_num_layers=1, video_connector_num_learnable_registers=None, audio_connector_num_attention_heads=4, audio_connector_attention_head_dim=8, audio_connector_num_layers=1, audio_connector_num_learnable_registers=None, connector_rope_base_seq_len=32, rope_theta=10000.0, rope_double_precision=False, causal_temporal_positioning=False, rope_type="split", ) torch.manual_seed(0) vae = AutoencoderKLLTX2Video( in_channels=3, out_channels=3, latent_channels=4, block_out_channels=(8,), decoder_block_out_channels=(8,), layers_per_block=(1,), decoder_layers_per_block=(1, 1), spatio_temporal_scaling=(True,), decoder_spatio_temporal_scaling=(True,), decoder_inject_noise=(False, False), downsample_type=("spatial",), upsample_residual=(False,), upsample_factor=(1,), timestep_conditioning=False, patch_size=1, patch_size_t=1, encoder_causal=True, decoder_causal=False, ) vae.use_framewise_encoding = False vae.use_framewise_decoding = False torch.manual_seed(0) audio_vae = AutoencoderKLLTX2Audio( base_channels=4, output_channels=2, ch_mult=(1,), num_res_blocks=1, attn_resolutions=None, in_channels=2, resolution=32, latent_channels=2, norm_type="pixel", causality_axis="height", dropout=0.0, mid_block_add_attention=False, sample_rate=16000, mel_hop_length=160, is_causal=True, mel_bins=8, ) torch.manual_seed(0) vocoder = LTX2Vocoder( in_channels=audio_vae.config.output_channels * audio_vae.config.mel_bins, hidden_channels=32, out_channels=2, upsample_kernel_sizes=[4, 4], upsample_factors=[2, 2], resnet_kernel_sizes=[3], resnet_dilations=[[1, 3, 5]], leaky_relu_negative_slope=0.1, output_sampling_rate=16000, ) scheduler = FlowMatchEulerDiscreteScheduler() components = { "transformer": transformer, "vae": vae, "audio_vae": audio_vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "connectors": connectors, "vocoder": vocoder, } return components def get_dummy_upsample_component(self, in_channels=4, mid_channels=32, num_blocks_per_stage=1): upsampler = LTX2LatentUpsamplerModel( in_channels=in_channels, mid_channels=mid_channels, num_blocks_per_stage=num_blocks_per_stage, ) return upsampler def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = torch.rand((1, 3, 32, 32), generator=generator, device=device) inputs = { "image": image, "prompt": "a robot dancing", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "height": 32, "width": 32, "num_frames": 5, "frame_rate": 25.0, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) video = output.frames audio = output.audio self.assertEqual(video.shape, (1, 5, 3, 32, 32)) self.assertEqual(audio.shape[0], 1) self.assertEqual(audio.shape[1], components["vocoder"].config.out_channels) # fmt: off expected_video_slice = torch.tensor( [ 0.3573, 0.8382, 0.3581, 0.6114, 0.3682, 0.7969, 0.2552, 0.6399, 0.3113, 0.1497, 0.3249, 0.5395, 0.3498, 0.4526, 0.4536, 0.4555 ] ) expected_audio_slice = torch.tensor( [ 0.0294, 0.0498, 0.1269, 0.1135, 0.1639, 0.1116, 0.1730, 0.0931, 0.0672, -0.0069, 0.0688, 0.0097, 0.0808, 0.1231, 0.0986, 0.0739 ] ) # fmt: on video = video.flatten() audio = audio.flatten() generated_video_slice = torch.cat([video[:8], video[-8:]]) generated_audio_slice = torch.cat([audio[:8], audio[-8:]]) assert torch.allclose(expected_video_slice, generated_video_slice, atol=1e-4, rtol=1e-4) assert torch.allclose(expected_audio_slice, generated_audio_slice, atol=1e-4, rtol=1e-4) def test_two_stages_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "latent" first_stage_output = pipe(**inputs) video_latent = first_stage_output.frames audio_latent = first_stage_output.audio self.assertEqual(video_latent.shape, (1, 4, 3, 16, 16)) self.assertEqual(audio_latent.shape, (1, 2, 5, 2)) self.assertEqual(audio_latent.shape[1], components["vocoder"].config.out_channels) inputs["latents"] = video_latent inputs["audio_latents"] = audio_latent inputs["output_type"] = "pt" second_stage_output = pipe(**inputs) video = second_stage_output.frames audio = second_stage_output.audio self.assertEqual(video.shape, (1, 5, 3, 32, 32)) self.assertEqual(audio.shape[0], 1) self.assertEqual(audio.shape[1], components["vocoder"].config.out_channels) # fmt: off expected_video_slice = torch.tensor( [ 0.2665, 0.6915, 0.2939, 0.6767, 0.2552, 0.6215, 0.1765, 0.6248, 0.2800, 0.2356, 0.3480, 0.5395, 0.3190, 0.4128, 0.4784, 0.4086 ] ) expected_audio_slice = torch.tensor( [ 0.0273, 0.0490, 0.1253, 0.1129, 0.1655, 0.1057, 0.1707, 0.0943, 0.0672, -0.0069, 0.0688, 0.0097, 0.0808, 0.1231, 0.0986, 0.0739 ] ) # fmt: on video = video.flatten() audio = audio.flatten() generated_video_slice = torch.cat([video[:8], video[-8:]]) generated_audio_slice = torch.cat([audio[:8], audio[-8:]]) assert torch.allclose(expected_video_slice, generated_video_slice, atol=1e-4, rtol=1e-4) assert torch.allclose(expected_audio_slice, generated_audio_slice, atol=1e-4, rtol=1e-4) def test_two_stages_inference_with_upsampler(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "latent" first_stage_output = pipe(**inputs) video_latent = first_stage_output.frames audio_latent = first_stage_output.audio self.assertEqual(video_latent.shape, (1, 4, 3, 16, 16)) self.assertEqual(audio_latent.shape, (1, 2, 5, 2)) self.assertEqual(audio_latent.shape[1], components["vocoder"].config.out_channels) upsampler = self.get_dummy_upsample_component(in_channels=video_latent.shape[1]) upsample_pipe = LTX2LatentUpsamplePipeline(vae=pipe.vae, latent_upsampler=upsampler) upscaled_video_latent = upsample_pipe(latents=video_latent, output_type="latent", return_dict=False)[0] self.assertEqual(upscaled_video_latent.shape, (1, 4, 3, 32, 32)) inputs["latents"] = upscaled_video_latent inputs["audio_latents"] = audio_latent inputs["output_type"] = "pt" second_stage_output = pipe(**inputs) video = second_stage_output.frames audio = second_stage_output.audio self.assertEqual(video.shape, (1, 5, 3, 64, 64)) self.assertEqual(audio.shape[0], 1) self.assertEqual(audio.shape[1], components["vocoder"].config.out_channels) # fmt: off expected_video_slice = torch.tensor( [ 0.4497, 0.6757, 0.4219, 0.7686, 0.4525, 0.6483, 0.3969, 0.7404, 0.3541, 0.3039, 0.4592, 0.3521, 0.3665, 0.2785, 0.3336, 0.3079 ] ) expected_audio_slice = torch.tensor( [ 0.0271, 0.0492, 0.1249, 0.1126, 0.1661, 0.1060, 0.1717, 0.0944, 0.0672, -0.0069, 0.0688, 0.0097, 0.0808, 0.1231, 0.0986, 0.0739 ] ) # fmt: on video = video.flatten() audio = audio.flatten() generated_video_slice = torch.cat([video[:8], video[-8:]]) generated_audio_slice = torch.cat([audio[:8], audio[-8:]]) assert torch.allclose(expected_video_slice, generated_video_slice, atol=1e-4, rtol=1e-4) assert torch.allclose(expected_audio_slice, generated_audio_slice, atol=1e-4, rtol=1e-4) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=2e-2)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/ltx2/test_ltx2_image2video.py", "license": "Apache License 2.0", "lines": 305, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/pipelines/ltx/pipeline_ltx_i2v_long_multi_prompt.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy from typing import Any, Callable import numpy as np import PIL import torch from transformers import T5EncoderModel, T5TokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...loaders import FromSingleFileMixin, LTXVideoLoraLoaderMixin from ...models.autoencoders import AutoencoderKLLTXVideo from ...models.transformers import LTXVideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler, LTXEulerAncestralRFScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import LTXPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import LTXEulerAncestralRFScheduler, LTXI2VLongMultiPromptPipeline >>> pipe = LTXI2VLongMultiPromptPipeline.from_pretrained("LTX-Video-0.9.8-13B-distilled") >>> # For ComfyUI parity, swap in the RF scheduler (keeps the original config). >>> pipe.scheduler = LTXEulerAncestralRFScheduler.from_config(pipe.scheduler.config) >>> pipe = pipe.to("cuda").to(dtype=torch.bfloat16) >>> # Example A: get decoded frames (PIL) >>> out = pipe( ... prompt="a chimpanzee walks | a chimpanzee eats", ... num_frames=161, ... height=512, ... width=704, ... temporal_tile_size=80, ... temporal_overlap=24, ... output_type="pil", ... return_dict=True, ... ) >>> frames = out.frames[0] # list of PIL.Image.Image >>> # Example B: get latent video and decode later (saves VRAM during sampling) >>> out_latent = pipe(prompt="a chimpanzee walking", output_type="latent", return_dict=True).frames >>> frames = pipe.vae_decode_tiled(out_latent, output_type="pil")[0] ``` """ def get_latent_coords( latent_num_frames, latent_height, latent_width, batch_size, device, rope_interpolation_scale, latent_idx ): """ Compute latent patch top-left coordinates in (t, y, x) order. Args: latent_num_frames: int. Number of latent frames (T_lat). latent_height: int. Latent height (H_lat). latent_width: int. Latent width (W_lat). batch_size: int. Batch dimension (B). device: torch.device for the resulting tensor. rope_interpolation_scale: tuple[int|float, int|float, int|float]. Scale per (t, y, x) latent step to pixel coords. latent_idx: int | None. When not None, shifts the time coordinate to align segments: - <= 0 uses step multiples of rope_interpolation_scale[0] - > 0 starts at 1 then increments by rope_interpolation_scale[0] Returns: Tensor of shape [B, 3, T_lat * H_lat * W_lat] containing top-left coordinates per latent patch, repeated for each batch element. """ latent_sample_coords = torch.meshgrid( torch.arange(0, latent_num_frames, 1, device=device), torch.arange(0, latent_height, 1, device=device), torch.arange(0, latent_width, 1, device=device), indexing="ij", ) latent_sample_coords = torch.stack(latent_sample_coords, dim=0) latent_coords = latent_sample_coords.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1) latent_coords = latent_coords.flatten(2) pixel_coords = latent_coords * torch.tensor(rope_interpolation_scale, device=latent_coords.device)[None, :, None] if latent_idx is not None: if latent_idx <= 0: frame_idx = latent_idx * rope_interpolation_scale[0] else: frame_idx = 1 + (latent_idx - 1) * rope_interpolation_scale[0] if frame_idx == 0: pixel_coords[:, 0] = (pixel_coords[:, 0] + 1 - rope_interpolation_scale[0]).clamp(min=0) pixel_coords[:, 0] += frame_idx return pixel_coords # Copied from diffusers.pipelines.ltx.pipeline_ltx.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): r""" Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). Args: noise_cfg (`torch.Tensor`): The predicted noise tensor for the guided diffusion process. noise_pred_text (`torch.Tensor`): The predicted noise tensor for the text-guided diffusion process. guidance_rescale (`float`, *optional*, defaults to 0.0): A rescale factor applied to the noise predictions. Returns: noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor. """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg def adain_normalize_latents( curr_latents: torch.Tensor, ref_latents: torch.Tensor | None, factor: float ) -> torch.Tensor: """ Optional AdaIN normalization: channel-wise mean/variance matching of curr_latents to ref_latents, controlled by factor. Args: curr_latents: Tensor [B, C, T, H, W]. Current window latents. ref_latents: Tensor | None [B, C, T_ref, H, W]. Reference latents (e.g., first window) used to compute target stats. factor: float in [0, 1]. 0 keeps current stats; 1 matches reference stats. Returns: Tensor with per-channel mean/std blended towards the reference. """ if ref_latents is None or factor is None or factor <= 0: return curr_latents eps = torch.tensor(1e-6, device=curr_latents.device, dtype=curr_latents.dtype) # Compute per-channel means/stds for current and reference over (T, H, W) mu_curr = curr_latents.mean(dim=(2, 3, 4), keepdim=True) sigma_curr = curr_latents.std(dim=(2, 3, 4), keepdim=True) mu_ref = ref_latents.mean(dim=(2, 3, 4), keepdim=True).to(device=curr_latents.device, dtype=curr_latents.dtype) sigma_ref = ref_latents.std(dim=(2, 3, 4), keepdim=True).to(device=curr_latents.device, dtype=curr_latents.dtype) # Blend target statistics mu_blend = (1.0 - float(factor)) * mu_curr + float(factor) * mu_ref sigma_blend = (1.0 - float(factor)) * sigma_curr + float(factor) * sigma_ref sigma_blend = torch.clamp(sigma_blend, min=float(eps)) # Apply AdaIN curr_norm = (curr_latents - mu_curr) / (sigma_curr + eps) return curr_norm * sigma_blend + mu_blend def split_into_temporal_windows( latent_len: int, temporal_tile_size: int, temporal_overlap: int, compression: int ) -> list[tuple[int, int]]: """ Split latent frames into sliding windows. Args: latent_len: int. Number of latent frames (T_lat). temporal_tile_size: int. Window size in latent frames (> 0). temporal_overlap: int. Overlap between windows in latent frames (>= 0). compression: int. VAE temporal compression ratio (unused here; kept for parity). Returns: list[tuple[int, int]]: inclusive-exclusive (start, end) indices per window. """ if temporal_tile_size <= 0: raise ValueError("temporal_tile_size must be > 0") stride = max(temporal_tile_size - temporal_overlap, 1) windows = [] start = 0 while start < latent_len: end = min(start + temporal_tile_size, latent_len) windows.append((start, end)) if end == latent_len: break start = start + stride return windows def linear_overlap_fuse(prev: torch.Tensor, new: torch.Tensor, overlap: int) -> torch.Tensor: """ Temporal linear crossfade between two latent clips over the overlap region. Args: prev: Tensor [B, C, F, H, W]. Previous output segment. new: Tensor [B, C, F, H, W]. New segment to be appended. overlap: int. Number of frames to crossfade (overlap <= 1 concatenates without blend). Returns: Tensor [B, C, F_prev + F_new - overlap, H, W] after crossfade at the seam. """ if overlap <= 1: return torch.cat([prev, new], dim=2) alpha = torch.linspace(1, 0, overlap + 2, device=prev.device, dtype=prev.dtype)[1:-1] shape = [1] * prev.ndim shape[2] = alpha.size(0) alpha = alpha.reshape(shape) blended = alpha * prev[:, :, -overlap:] + (1 - alpha) * new[:, :, :overlap] return torch.cat([prev[:, :, :-overlap], blended, new[:, :, overlap:]], dim=2) def inject_prev_tail_latents( window_latents: torch.Tensor, prev_tail_latents: torch.Tensor | None, window_cond_mask_5d: torch.Tensor, overlap_lat: int, strength: float | None, prev_overlap_len: int, ) -> tuple[torch.Tensor, torch.Tensor, int]: """ Inject the tail latents from the previous window at the beginning of the current window (first k frames), where k = min(overlap_lat, T_curr, T_prev_tail). Args: window_latents: Tensor [B, C, T, H, W]. Current window latents. prev_tail_latents: Tensor | None [B, C, T_prev, H, W]. Tail segment from the previous window. window_cond_mask_5d: Tensor [B, 1, T, H, W]. Per-token conditioning mask (1 = free, 0 = hard condition). overlap_lat: int. Number of latent frames to inject from the previous tail. strength: float | None in [0, 1]. Blend strength; 1.0 replaces, 0.0 keeps original. prev_overlap_len: int. Accumulated overlap length so far (used for trimming later). Returns: tuple[Tensor, Tensor, int]: (updated_window_latents, updated_cond_mask, updated_prev_overlap_len) """ if prev_tail_latents is None or overlap_lat <= 0 or strength is None or strength <= 0: return window_latents, window_cond_mask_5d, prev_overlap_len # Expected shape: [B, C, T, H, W] T = int(window_latents.shape[2]) k = min(int(overlap_lat), T, int(prev_tail_latents.shape[2])) if k <= 0: return window_latents, window_cond_mask_5d, prev_overlap_len tail = prev_tail_latents[:, :, -k:] mask = torch.full( (window_cond_mask_5d.shape[0], 1, tail.shape[2], window_cond_mask_5d.shape[3], window_cond_mask_5d.shape[4]), 1.0 - strength, dtype=window_cond_mask_5d.dtype, device=window_cond_mask_5d.device, ) window_latents = torch.cat([window_latents, tail], dim=2) window_cond_mask_5d = torch.cat([window_cond_mask_5d, mask], dim=2) return window_latents, window_cond_mask_5d, prev_overlap_len + k def build_video_coords_for_window( latents: torch.Tensor, overlap_len: int, guiding_len: int, negative_len: int, rope_interpolation_scale: torch.Tensor, frame_rate: int, ) -> torch.Tensor: """ Build video_coords: [B, 3, S] with order [t, y, x]. Args: latents: Tensor [B, C, T, H, W]. Current window latents (before any trimming). overlap_len: int. Number of frames from previous tail injected at the head. guiding_len: int. Number of guidance frames appended at the head. negative_len: int. Number of negative-index frames appended at the head (typically 1 or 0). rope_interpolation_scale: tuple[int|float, int|float, int|float]. Scale for (t, y, x). frame_rate: int. Used to convert time indices into seconds (t /= frame_rate). Returns: Tensor [B, 3, T*H*W] of fractional pixel coordinates per latent patch. """ b, c, f, h, w = latents.shape pixel_coords = get_latent_coords(f, h, w, b, latents.device, rope_interpolation_scale, 0) replace_corrds = [] if overlap_len > 0: replace_corrds.append(get_latent_coords(overlap_len, h, w, b, latents.device, rope_interpolation_scale, 0)) if guiding_len > 0: replace_corrds.append( get_latent_coords(guiding_len, h, w, b, latents.device, rope_interpolation_scale, overlap_len) ) if negative_len > 0: replace_corrds.append(get_latent_coords(negative_len, h, w, b, latents.device, rope_interpolation_scale, -1)) if len(replace_corrds) > 0: replace_corrds = torch.cat(replace_corrds, axis=2) pixel_coords[:, :, -replace_corrds.shape[2] :] = replace_corrds fractional_coords = pixel_coords.to(torch.float32) fractional_coords[:, 0] = fractional_coords[:, 0] * (1.0 / frame_rate) return fractional_coords def parse_prompt_segments(prompt: str | list[str], prompt_segments: list[dict[str, Any]] | None) -> list[str]: """ Return a list of positive prompts per window index. Args: prompt: str | list[str]. If str contains '|', parts are split by bars and trimmed. prompt_segments: list[dict], optional. Each dict with {"start_window", "end_window", "text"} overrides prompts per window. Returns: list[str] containing the positive prompt for each window index. """ if prompt is None: return [] if prompt_segments: max_w = 0 for seg in prompt_segments: max_w = max(max_w, int(seg.get("end_window", 0))) texts = [""] * (max_w + 1) for seg in prompt_segments: s = int(seg.get("start_window", 0)) e = int(seg.get("end_window", s)) txt = seg.get("text", "") for w in range(s, e + 1): texts[w] = txt # fill empty by last non-empty last = "" for i in range(len(texts)): if texts[i] == "": texts[i] = last else: last = texts[i] return texts # bar-split mode if isinstance(prompt, str): parts = [p.strip() for p in prompt.split("|")] else: parts = prompt parts = [p for p in parts if p is not None] return parts def batch_normalize(latents, reference, factor): """ Batch AdaIN-like normalization for latents in dict format (ComfyUI-compatible). Args: latents: dict containing "samples" shaped [B, C, F, H, W] reference: dict containing "samples" used to compute target stats factor: float in [0, 1]; 0 = no change, 1 = full match to reference Returns: tuple[dict]: a single-element tuple with the updated latents dict. """ latents_copy = copy.deepcopy(latents) t = latents_copy["samples"] # B x C x F x H x W for i in range(t.size(0)): # batch for c in range(t.size(1)): # channel r_sd, r_mean = torch.std_mean(reference["samples"][i, c], dim=None) # index by original dim order i_sd, i_mean = torch.std_mean(t[i, c], dim=None) t[i, c] = ((t[i, c] - i_mean) / i_sd) * r_sd + r_mean latents_copy["samples"] = torch.lerp(latents["samples"], t, factor) return (latents_copy,) class LTXI2VLongMultiPromptPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixin): r""" Long-duration I2V (image-to-video) multi-prompt pipeline with ComfyUI parity. Key features: - Temporal sliding-window sampling only (no spatial H/W sharding); autoregressive fusion across windows. - Multi-prompt segmentation per window with smooth transitions at window heads. - First-frame hard conditioning via per-token mask for I2V. - VRAM control via temporal windowing and VAE tiled decoding. Reference: https://github.com/Lightricks/LTX-Video Args: transformer ([`LTXVideoTransformer3DModel`]): Conditional Transformer architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`] or [`LTXEulerAncestralRFScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLLTXVideo`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`T5TokenizerFast`): Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLLTXVideo, text_encoder: T5EncoderModel, tokenizer: T5TokenizerFast, transformer: LTXVideoTransformer3DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) if not isinstance(scheduler, LTXEulerAncestralRFScheduler): logger.warning( "For ComfyUI parity, `LTXI2VLongMultiPromptPipeline` is typically run with " "`LTXEulerAncestralRFScheduler`. Got %s.", scheduler.__class__.__name__, ) self.vae_spatial_compression_ratio = ( self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32 ) self.vae_temporal_compression_ratio = ( self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8 ) self.transformer_spatial_patch_size = ( self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1 ) self.transformer_temporal_patch_size = ( self.transformer.config.patch_size_t if getattr(self, "transformer", None) is not None else 1 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio) self.tokenizer_max_length = ( self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128 ) self.default_height = 512 self.default_width = 704 self.default_frames = 121 self._current_tile_T = None @property # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.guidance_scale def guidance_scale(self): return self._guidance_scale @property # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.guidance_rescale def guidance_rescale(self): return self._guidance_rescale @property # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.do_classifier_free_guidance def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.num_timesteps def num_timesteps(self): return self._num_timesteps @property # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.current_timestep def current_timestep(self): return self._current_timestep @property # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.attention_kwargs def attention_kwargs(self): return self._attention_kwargs @property # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.interrupt def interrupt(self): return self._interrupt # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: str | list[str] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 128, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device))[0] prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(batch_size, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, max_sequence_length: int = 128, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_attention_mask = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._pack_latents def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor: # Unpacked latents of shape are [B, C, F, H, W] are patched into tokens of shape [B, C, F // p_t, p_t, H // p, p, W // p, p]. # The patch dimensions are then permuted and collapsed into the channel dimension of shape: # [B, F // p_t * H // p * W // p, C * p_t * p * p] (an ndim=3 tensor). # dim=0 is the batch size, dim=1 is the effective video sequence length, dim=2 is the effective number of input features batch_size, num_channels, num_frames, height, width = latents.shape post_patch_num_frames = num_frames // patch_size_t post_patch_height = height // patch_size post_patch_width = width // patch_size latents = latents.reshape( batch_size, -1, post_patch_num_frames, patch_size_t, post_patch_height, patch_size, post_patch_width, patch_size, ) latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3) return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._unpack_latents def _unpack_latents( latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1 ) -> torch.Tensor: # Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions) # are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of # what happens in the `_pack_latents` method. batch_size = latents.size(0) latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size) latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3) return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._normalize_latents def _normalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Normalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = (latents - latents_mean) * scaling_factor / latents_std return latents @staticmethod # Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._denormalize_latents def _denormalize_latents( latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0 ) -> torch.Tensor: # Denormalize latents across the channel dimension [B, C, F, H, W] latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype) latents = latents * latents_std / scaling_factor + latents_mean return latents def prepare_latents( self, batch_size: int, num_channels_latents: int, height: int, width: int, num_frames: int, device: torch.device, generator: torch.Generator | None, dtype: torch.dtype = torch.float32, latents: torch.Tensor | None = None, cond_latents: torch.Tensor | None = None, cond_strength: float = 0.0, negative_index_latents: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor | None, int, int, int]: """ Prepare base latents and optionally inject first-frame conditioning latents. Returns: latents, negative_index_latents, latent_num_frames, latent_height, latent_width """ if latents is None: latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1 latent_height = height // self.vae_spatial_compression_ratio latent_width = width // self.vae_spatial_compression_ratio latents = torch.zeros( (batch_size, num_channels_latents, latent_num_frames, latent_height, latent_width), device=device, dtype=dtype, ) else: latent_num_frames = latents.shape[2] latent_height = latents.shape[3] latent_width = latents.shape[4] latents = latents.to(device=device, dtype=dtype) if cond_latents is not None and cond_strength > 0: if negative_index_latents is None: negative_index_latents = cond_latents latents[:, :, :1, :, :] = cond_latents return latents, negative_index_latents, latent_num_frames, latent_height, latent_width # TODO: refactor this out @torch.no_grad() def vae_decode_tiled( self, latents: torch.Tensor, decode_timestep: float | None = None, decode_noise_scale: float | None = None, horizontal_tiles: int = 4, vertical_tiles: int = 4, overlap: int = 3, last_frame_fix: bool = True, generator: torch.Generator | None = None, output_type: str = "pt", auto_denormalize: bool = True, compute_dtype: torch.dtype = torch.float32, enable_vae_tiling: bool = False, ) -> torch.Tensor | np.ndarray | list[PIL.Image.Image]: """ VAE-based spatial tiled decoding (ComfyUI parity) implemented in Diffusers style. - Linearly feather and blend overlapping tiles to avoid seams. - Optional last_frame_fix: duplicate the last latent frame before decoding, then drop time_scale_factor frames at the end. - Supports timestep_conditioning and decode_noise_scale injection. - By default, "normalized latents" (the denoising output) are de-normalized internally (auto_denormalize=True). - Tile fusion is computed in compute_dtype (float32 by default) to reduce blur and color shifts. Args: latents: [B, C_latent, F_latent, H_latent, W_latent] decode_timestep: Optional decode timestep (effective only if VAE supports timestep_conditioning) decode_noise_scale: Optional decode noise interpolation (effective only if VAE supports timestep_conditioning) horizontal_tiles, vertical_tiles: Number of tiles horizontally/vertically (>= 1) overlap: Overlap in latent space (in latent pixels, >= 0) last_frame_fix: Whether to enable the "repeat last frame" fix generator: Random generator (used for decode_noise_scale noise) output_type: "latent" | "pt" | "np" | "pil" - "latent": return latents unchanged (useful for downstream processing) - "pt": return tensor in VAE output space - "np"/"pil": post-processed outputs via VideoProcessor.postprocess_video auto_denormalize: If True, apply LTX de-normalization to `latents` internally (recommended) compute_dtype: Precision used during tile fusion (float32 default; significantly reduces seam blur) enable_vae_tiling: If True, delegate tiling to VAE's built-in `tiled_decode` (sets `vae.use_tiling`). Returns: - If output_type="latent": returns input `latents` unchanged - If output_type="pt": returns [B, C, F, H, W] (values roughly in [-1, 1]) - If output_type="np"/"pil": returns post-processed outputs via postprocess_video """ if output_type == "latent": return latents if horizontal_tiles < 1 or vertical_tiles < 1: raise ValueError("horizontal_tiles and vertical_tiles must be >= 1") overlap = max(int(overlap), 0) # Device and precision device = self._execution_device latents = latents.to(device=device, dtype=compute_dtype) # De-normalize to VAE space (avoid color artifacts) if auto_denormalize: latents = self._denormalize_latents( latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) # dtype required for VAE forward pass latents = latents.to(dtype=self.vae.dtype) # Temporal/spatial upscaling ratios (parity with ComfyUI's downscale_index_formula) tsf = int(self.vae_temporal_compression_ratio) sf = int(self.vae_spatial_compression_ratio) # Optional: last_frame_fix (repeat last latent frame) if last_frame_fix: latents = torch.cat([latents, latents[:, :, -1:].contiguous()], dim=2) b, c_lat, f_lat, h_lat, w_lat = latents.shape f_out = 1 + (f_lat - 1) * tsf h_out = h_lat * sf w_out = w_lat * sf # timestep_conditioning + decode-time noise injection (aligned with pipeline) if getattr(self.vae.config, "timestep_conditioning", False): dt = float(decode_timestep) if decode_timestep is not None else 0.0 vt = torch.tensor([dt], device=device, dtype=latents.dtype) if decode_noise_scale is not None: dns = torch.tensor([float(decode_noise_scale)], device=device, dtype=latents.dtype)[ :, None, None, None, None ] noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype) latents = (1 - dns) * latents + dns * noise else: vt = None if enable_vae_tiling and hasattr(self.vae, "enable_tiling"): self.vae.enable_tiling() decoded = self.vae.decode(latents, vt, return_dict=False)[0] if last_frame_fix: decoded = decoded[:, :, :-tsf, :, :] if output_type in ("np", "pil"): return self.video_processor.postprocess_video(decoded, output_type=output_type) return decoded # Compute base tile sizes (in latent space) base_tile_h = (h_lat + (vertical_tiles - 1) * overlap) // vertical_tiles base_tile_w = (w_lat + (horizontal_tiles - 1) * overlap) // horizontal_tiles output: torch.Tensor | None = None # [B, C_img, F, H, W], fused using compute_dtype weights: torch.Tensor | None = None # [B, 1, F, H, W], fused using compute_dtype # Iterate tiles in latent space (no temporal tiling) for v in range(vertical_tiles): for h in range(horizontal_tiles): h_start = h * (base_tile_w - overlap) v_start = v * (base_tile_h - overlap) h_end = min(h_start + base_tile_w, w_lat) if h < horizontal_tiles - 1 else w_lat v_end = min(v_start + base_tile_h, h_lat) if v < vertical_tiles - 1 else h_lat # Slice latent tile and decode tile_latents = latents[:, :, :, v_start:v_end, h_start:h_end] decoded_tile = self.vae.decode(tile_latents, vt, return_dict=False)[0] # [B, C, F, Ht, Wt] # Cast to high precision to reduce blending blur decoded_tile = decoded_tile.to(dtype=compute_dtype) # Initialize output buffers (compute_dtype) if output is None: output = torch.zeros( (b, decoded_tile.shape[1], f_out, h_out, w_out), device=decoded_tile.device, dtype=compute_dtype, ) weights = torch.zeros( (b, 1, f_out, h_out, w_out), device=decoded_tile.device, dtype=compute_dtype, ) # Tile placement in output pixel space out_h_start = v_start * sf out_h_end = v_end * sf out_w_start = h_start * sf out_w_end = h_end * sf tile_out_h = out_h_end - out_h_start tile_out_w = out_w_end - out_w_start # Linear feathering weights [B, 1, F, Ht, Wt] (compute_dtype) tile_weights = torch.ones( (b, 1, decoded_tile.shape[2], tile_out_h, tile_out_w), device=decoded_tile.device, dtype=compute_dtype, ) overlap_out_h = overlap * sf overlap_out_w = overlap * sf # Horizontal feathering: left/right overlaps if overlap_out_w > 0: if h > 0: h_blend = torch.linspace( 0, 1, steps=overlap_out_w, device=decoded_tile.device, dtype=compute_dtype ) tile_weights[:, :, :, :, :overlap_out_w] *= h_blend.view(1, 1, 1, 1, -1) if h < horizontal_tiles - 1: h_blend = torch.linspace( 1, 0, steps=overlap_out_w, device=decoded_tile.device, dtype=compute_dtype ) tile_weights[:, :, :, :, -overlap_out_w:] *= h_blend.view(1, 1, 1, 1, -1) # Vertical feathering: top/bottom overlaps if overlap_out_h > 0: if v > 0: v_blend = torch.linspace( 0, 1, steps=overlap_out_h, device=decoded_tile.device, dtype=compute_dtype ) tile_weights[:, :, :, :overlap_out_h, :] *= v_blend.view(1, 1, 1, -1, 1) if v < vertical_tiles - 1: v_blend = torch.linspace( 1, 0, steps=overlap_out_h, device=decoded_tile.device, dtype=compute_dtype ) tile_weights[:, :, :, -overlap_out_h:, :] *= v_blend.view(1, 1, 1, -1, 1) # Accumulate blended tile output[:, :, :, out_h_start:out_h_end, out_w_start:out_w_end] += decoded_tile * tile_weights weights[:, :, :, out_h_start:out_h_end, out_w_start:out_w_end] += tile_weights # Normalize, then clamp to [-1, 1] in compute_dtype to avoid color artifacts output = output / (weights + 1e-8) output = output.clamp(-1.0, 1.0) output = output.to(dtype=self.vae.dtype) # Optional: drop the last tsf frames after last_frame_fix if last_frame_fix: output = output[:, :, :-tsf, :, :] if output_type in ("np", "pil"): return self.video_processor.postprocess_video(output, output_type=output_type) return output @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, negative_prompt: str | list[str] | None = None, prompt_segments: list[dict[str, Any]] | None = None, height: int = 512, width: int = 704, num_frames: int = 161, frame_rate: float = 25, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, num_inference_steps: int | None = 8, sigmas: list[float, torch.Tensor] | None = None, generator: torch.Generator | list[torch.Generator] | None = None, seed: int | None = 0, cond_image: "PIL.Image.Image" | torch.Tensor | None = None, cond_strength: float = 0.5, latents: torch.Tensor | None = None, temporal_tile_size: int = 80, temporal_overlap: int = 24, temporal_overlap_cond_strength: float = 0.5, adain_factor: float = 0.25, guidance_latents: torch.Tensor | None = None, guiding_strength: float = 1.0, negative_index_latents: torch.Tensor | None = None, negative_index_strength: float = 1.0, skip_steps_sigma_threshold: float | None = 1, decode_timestep: float | None = 0.05, decode_noise_scale: float | None = 0.025, decode_horizontal_tiles: int = 4, decode_vertical_tiles: int = 4, decode_overlap: int = 3, output_type: str | None = "latent", # "latent" | "pt" | "np" | "pil" return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 128, ): r""" Generate an image-to-video sequence via temporal sliding windows and multi-prompt scheduling. Args: prompt (`str` or `list[str]`, *optional*): Positive text prompt(s) per window. If a single string contains '|', parts are split by bars. negative_prompt (`str` or `list[str]`, *optional*): Negative prompt(s) to suppress undesired content. prompt_segments (`list[dict]`, *optional*): Segment mapping with {"start_window", "end_window", "text"} to override prompts per window. height (`int`, defaults to `512`): Output image height in pixels; must be divisible by 32. width (`int`, defaults to `704`): Output image width in pixels; must be divisible by 32. num_frames (`int`, defaults to `161`): Number of output frames (in decoded pixel space). frame_rate (`float`, defaults to `25`): Frames-per-second; used to normalize temporal coordinates in `video_coords`. guidance_scale (`float`, defaults to `1.0`): CFG scale; values > 1 enable classifier-free guidance. guidance_rescale (`float`, defaults to `0.0`): Optional rescale to mitigate overexposure under CFG (see `rescale_noise_cfg`). num_inference_steps (`int`, *optional*, defaults to `8`): Denoising steps per window. Ignored if `sigmas` is provided. sigmas (`list[float]` or `torch.Tensor`, *optional*): Explicit sigma schedule per window; if set, overrides `num_inference_steps`. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): Controls stochasticity; list accepted but first element is used (batch=1). seed (`int`, *optional*, defaults to `0`): If provided, seeds the shared generator for global latents and derives a window-local generator with `seed + w_start` per temporal window. cond_image (`PIL.Image.Image` or `torch.Tensor`, *optional*): Conditioning image; fixes frame 0 via per-token mask when `cond_strength > 0`. cond_strength (`float`, defaults to `0.5`): Strength of first-frame hard conditioning (smaller cond_mask ⇒ stronger preservation). latents (`torch.Tensor`, *optional*): Initial latents [B, C_lat, F_lat, H_lat, W_lat]; if None, sampled with `randn_tensor`. temporal_tile_size (`int`, defaults to `80`): Temporal window size (in decoded frames); internally scaled by VAE temporal compression. temporal_overlap (`int`, defaults to `24`): Overlap between consecutive windows (in decoded frames); internally scaled by compression. temporal_overlap_cond_strength (`float`, defaults to `0.5`): Strength for injecting previous window tail latents at new window head. adain_factor (`float`, defaults to `0.25`): AdaIN normalization strength for cross-window consistency (0 disables). guidance_latents (`torch.Tensor`, *optional*): Reference latents injected at window head; length trimmed by overlap for subsequent windows. guiding_strength (`float`, defaults to `1.0`): Injection strength for `guidance_latents`. negative_index_latents (`torch.Tensor`, *optional*): A single-frame latent appended at window head for "negative index" semantics. negative_index_strength (`float`, defaults to `1.0`): Injection strength for `negative_index_latents`. skip_steps_sigma_threshold (`float`, *optional*, defaults to `1`): Skip steps whose sigma exceeds this threshold. decode_timestep (`float`, *optional*, defaults to `0.05`): Decode-time timestep (if VAE supports timestep_conditioning). decode_noise_scale (`float`, *optional*, defaults to `0.025`): Decode-time noise mix scale (if VAE supports timestep_conditioning). decode_horizontal_tiles (`int`, defaults to `4`): Number of horizontal tiles during VAE decoding. decode_vertical_tiles (`int`, defaults to `4`): Number of vertical tiles during VAE decoding. decode_overlap (`int`, defaults to `3`): Overlap (in latent pixels) between tiles during VAE decoding. output_type (`str`, *optional*, defaults to `"latent"`): The output format of the generated video. Choose between "latent", "pt", "np", or "pil". If "latent", returns latents without decoding. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ltx.LTXPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): Extra attention parameters forwarded to the transformer. callback_on_step_end (`PipelineCallback` or `MultiPipelineCallbacks`, *optional*): Per-step callback hook. callback_on_step_end_tensor_inputs (`list[str]`, defaults to `["latents"]`): Keys from locals() to pass into the callback. max_sequence_length (`int`, defaults to `128`): Tokenizer max length for prompt encoding. Examples: Returns: [`~pipelines.ltx.LTXPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ltx.LTXPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated frames. The output format depends on `output_type`: - "latent"/"pt": `torch.Tensor` [B, C, F, H, W]; "latent" is in normalized latent space, "pt" is VAE output space. - "np": `np.ndarray` post-processed. - "pil": `list[PIL.Image.Image]` list of PIL images. Shapes: Latent sizes (when auto-generated): - F_lat = (num_frames - 1) // vae_temporal_compression_ratio + 1 - H_lat = height // vae_spatial_compression_ratio - W_lat = width // vae_spatial_compression_ratio Notes: - Seeding: when `seed` is provided, each temporal window uses a local generator seeded with `seed + w_start`, while the shared generator is seeded once for global latents if no generator is passed; otherwise the passed-in generator is reused. - CFG: unified `noise_pred = uncond + w * (text - uncond)` with optional `guidance_rescale`. - Memory: denoising performs full-frame predictions (no spatial tiling); decoding can be tiled to avoid OOM. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 0. Input validation: height/width must be divisible by 32 if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._attention_kwargs = attention_kwargs self._interrupt = False self._current_timestep = None # 1. Device & generator device = self._execution_device # Normalize generator input: accept list but use the first (batch_size=1) if isinstance(generator, list): generator = generator[0] if seed is not None and generator is None: generator = torch.Generator(device=device).manual_seed(seed) # 2. Optional i2v first frame conditioning: encode cond_image and inject at frame 0 via prepare_latents cond_latents = None if cond_image is not None and cond_strength > 0: img = self.video_processor.preprocess(cond_image, height=height, width=width) img = img.to(device=device, dtype=self.vae.dtype) enc = self.vae.encode(img.unsqueeze(2)) # [B, C, 1, h, w] cond_latents = enc.latent_dist.mode() if hasattr(enc, "latent_dist") else enc.latents cond_latents = cond_latents.to(torch.float32) cond_latents = self._normalize_latents( cond_latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor ) # 3. Global initial latents [B,C,F,H,W], optionally seeded/conditioned latents, negative_index_latents, latent_num_frames, latent_height, latent_width = self.prepare_latents( batch_size=1, num_channels_latents=self.transformer.config.in_channels, height=height, width=width, num_frames=num_frames, device=device, generator=generator, dtype=torch.float32, latents=latents, cond_latents=cond_latents, cond_strength=cond_strength, negative_index_latents=negative_index_latents, ) if guidance_latents is not None: guidance_latents = guidance_latents.to(device=device, dtype=torch.float32) if latents.shape[2] != guidance_latents.shape[2]: raise ValueError("The number of frames in `latents` and `guidance_latents` must be the same") # 4. Sliding windows in latent frames tile_size_lat = max(1, temporal_tile_size // self.vae_temporal_compression_ratio) overlap_lat = max(0, temporal_overlap // self.vae_temporal_compression_ratio) windows = split_into_temporal_windows( latent_num_frames, tile_size_lat, overlap_lat, self.vae_temporal_compression_ratio ) # 5. Multi-prompt segments parsing segment_texts = parse_prompt_segments(prompt, prompt_segments) out_latents = None first_window_latents = None # 6. Process each temporal window for w_idx, (w_start, w_end) in enumerate(windows): if self.interrupt: break # 6.1 Encode prompt embeddings per window segment seg_index = min(w_idx, len(segment_texts) - 1) if segment_texts else 0 pos_text = segment_texts[seg_index] if segment_texts else (prompt if isinstance(prompt, str) else "") ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt=[pos_text], negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=1, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, max_sequence_length=max_sequence_length, device=device, dtype=None, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) # 6.2 Window-level timesteps reset: fresh sampling for each temporal window if sigmas is not None: s = torch.tensor(sigmas, dtype=torch.float32) if not isinstance(sigmas, torch.Tensor) else sigmas self.scheduler.set_timesteps(sigmas=s, device=device) self._num_timesteps = len(sigmas) else: self.scheduler.set_timesteps(num_inference_steps=num_inference_steps, device=device) self._num_timesteps = num_inference_steps # 6.3 Extract window latents [B,C,T,H,W] window_latents = latents[:, :, w_start:w_end] window_guidance_latents = guidance_latents[:, :, w_start:w_end] if guidance_latents is not None else None window_T = window_latents.shape[2] # 6.4 Build per-window cond mask and inject previous tails / reference window_cond_mask_5d = torch.ones( (1, 1, window_T, latent_height, latent_width), device=device, dtype=torch.float32 ) self._current_tile_T = window_T prev_overlap_len = 0 # Inter-window tail latent injection (Extend) if w_idx > 0 and overlap_lat > 0 and out_latents is not None: k = min(overlap_lat, out_latents.shape[2]) prev_tail = out_latents[:, :, -k:] window_latents, window_cond_mask_5d, prev_overlap_len = inject_prev_tail_latents( window_latents, prev_tail, window_cond_mask_5d, overlap_lat, temporal_overlap_cond_strength, prev_overlap_len, ) # Reference/negative-index latent injection (append 1 frame at window head; controlled by negative_index_strength) if window_guidance_latents is not None: guiding_len = ( window_guidance_latents.shape[2] if w_idx == 0 else window_guidance_latents.shape[2] - overlap_lat ) window_latents, window_cond_mask_5d, prev_overlap_len = inject_prev_tail_latents( window_latents, window_guidance_latents[:, :, -guiding_len:], window_cond_mask_5d, guiding_len, guiding_strength, prev_overlap_len, ) else: guiding_len = 0 window_latents, window_cond_mask_5d, prev_overlap_len = inject_prev_tail_latents( window_latents, negative_index_latents, window_cond_mask_5d, 1, negative_index_strength, prev_overlap_len, ) if w_idx == 0 and cond_image is not None and cond_strength > 0: # First-frame I2V: smaller mask means stronger preservation of the original latent window_cond_mask_5d[:, :, 0] = 1.0 - cond_strength # Update effective window latent sizes (consider injections on T/H/W) w_B, w_C, w_T_eff, w_H_eff, w_W_eff = window_latents.shape p = self.transformer_spatial_patch_size pt = self.transformer_temporal_patch_size # 6.5 Pack full-window latents/masks once # Seeding policy: derive a window-local generator to decouple RNG across windows if seed is not None: tile_seed = int(seed) + int(w_start) local_gen = torch.Generator(device=device).manual_seed(tile_seed) else: local_gen = generator # randn*mask + (1-mask)*latents implements hard-condition initialization init_rand = randn_tensor(window_latents.shape, generator=local_gen, device=device, dtype=torch.float32) mixed_latents = init_rand * window_cond_mask_5d + (1 - window_cond_mask_5d) * window_latents window_latents_packed = self._pack_latents( window_latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) latents_packed = self._pack_latents( mixed_latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) cond_mask_tokens = self._pack_latents( window_cond_mask_5d, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size ) if self.do_classifier_free_guidance: cond_mask = torch.cat([cond_mask_tokens, cond_mask_tokens], dim=0) else: cond_mask = cond_mask_tokens # 6.6 Denoising loop per full window (no spatial tiling) sigmas_current = self.scheduler.sigmas.to(device=latents_packed.device) if sigmas_current.shape[0] >= 2: for i, t in enumerate(self.progress_bar(self.scheduler.timesteps[:-1])): if self.interrupt: break # Skip semantics: if sigma exceeds threshold, skip this step (do not call scheduler.step) sigma_val = float(sigmas_current[i].item()) if skip_steps_sigma_threshold is not None and float(skip_steps_sigma_threshold) > 0.0: if sigma_val > float(skip_steps_sigma_threshold): continue self._current_timestep = t # Model input (stack 2 copies under CFG) latent_model_input = ( torch.cat([latents_packed] * 2) if self.do_classifier_free_guidance else latents_packed ) # Broadcast timesteps, combine with per-token cond mask (I2V at window head) timestep = t.expand(latent_model_input.shape[0]) if cond_mask is not None: # Broadcast timestep to per-token mask under CFG: [B] -> [B, S, 1] timestep = timestep[:, None, None] * cond_mask # Micro-conditions: only provide video_coords (num_frames/height/width set to 1) rope_interpolation_scale = ( self.vae_temporal_compression_ratio, self.vae_spatial_compression_ratio, self.vae_spatial_compression_ratio, ) # Inpainting pre-blend (ComfyUI parity: KSamplerX0Inpaint:400) if cond_mask_tokens is not None: latents_packed = latents_packed * cond_mask_tokens + window_latents_packed * ( 1.0 - cond_mask_tokens ) # Negative-index/overlap lengths (for segmenting time coordinates; RoPE-compatible) k_negative_count = ( 1 if (negative_index_latents is not None and float(negative_index_strength) > 0.0) else 0 ) k_overlap_count = overlap_lat if (w_idx > 0 and overlap_lat > 0) else 0 video_coords = build_video_coords_for_window( latents=window_latents, overlap_len=int(k_overlap_count), guiding_len=int(guiding_len), negative_len=int(k_negative_count), rope_interpolation_scale=rope_interpolation_scale, frame_rate=frame_rate, ) with self.transformer.cache_context("cond_uncond"): noise_pred = self.transformer( hidden_states=latent_model_input.to(dtype=self.transformer.dtype), encoder_hidden_states=prompt_embeds, timestep=timestep, encoder_attention_mask=prompt_attention_mask, num_frames=1, height=1, width=1, rope_interpolation_scale=rope_interpolation_scale, video_coords=video_coords, attention_kwargs=attention_kwargs, return_dict=False, )[0] # Unified CFG if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.guidance_rescale > 0: noise_pred = rescale_noise_cfg( noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale ) # Use global timestep for scheduling, but apply suppressive blending with hard-condition tokens (e.g., first frame) after step to avoid brightness/flicker due to time misalignment latents_packed = self.scheduler.step( noise_pred, t, latents_packed, generator=local_gen, return_dict=False )[0] # Inpainting post-blend (ComfyUI parity: restore hard-conditioned regions after update) if cond_mask_tokens is not None: latents_packed = latents_packed * cond_mask_tokens + window_latents_packed * ( 1.0 - cond_mask_tokens ) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents_packed = callback_outputs.pop("latents", latents_packed) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) if XLA_AVAILABLE: xm.mark_step() else: # Not enough sigmas to perform a valid step; skip this window safely. pass # 6.7 Unpack back to [B,C,T,H,W] once window_out = self._unpack_latents( latents_packed, w_T_eff, w_H_eff, w_W_eff, p, pt, ) if prev_overlap_len > 0: window_out = window_out[:, :, :-prev_overlap_len] # 6.8 Overlap handling and fusion if out_latents is None: # First window: keep all latent frames and cache as AdaIN reference out_latents = window_out first_window_latents = out_latents else: window_out = window_out[:, :, 1:] # Drop the first frame of the new window if adain_factor > 0 and first_window_latents is not None: window_out = adain_normalize_latents(window_out, first_window_latents, adain_factor) overlap_len = max(overlap_lat - 1, 1) prev_tail_chunk = out_latents[:, :, -window_out.shape[2] :] fused = linear_overlap_fuse(prev_tail_chunk, window_out, overlap_len) out_latents = torch.cat([out_latents[:, :, : -window_out.shape[2]], fused], dim=2) # 7. Decode or return latent if output_type == "latent": video = out_latents else: # Decode via tiling to avoid OOM from full-frame decoding; latents are already de-normalized, so keep auto_denormalize disabled video = self.vae_decode_tiled( out_latents, decode_timestep=decode_timestep, decode_noise_scale=decode_noise_scale, horizontal_tiles=int(decode_horizontal_tiles), vertical_tiles=int(decode_vertical_tiles), overlap=int(decode_overlap), generator=generator, output_type=output_type, # Keep type consistent; postprocess is applied afterwards ) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return LTXPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ltx/pipeline_ltx_i2v_long_multi_prompt.py", "license": "Apache License 2.0", "lines": 1247, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/schedulers/scheduling_ltx_euler_ancestral_rf.py
# Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ LTXEulerAncestralRFScheduler This scheduler implements a K-diffusion style Euler-Ancestral sampler specialized for flow / CONST parameterization, closely mirroring ComfyUI's `sample_euler_ancestral_RF` implementation used for LTX-Video. Reference implementation (ComfyUI): comfy.k_diffusion.sampling.sample_euler_ancestral_RF """ from dataclasses import dataclass import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, logging from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class LTXEulerAncestralRFSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor`): Updated sample for the next step in the denoising process. """ prev_sample: torch.FloatTensor class LTXEulerAncestralRFScheduler(SchedulerMixin, ConfigMixin): """ Euler-Ancestral scheduler for LTX-Video (RF / CONST parametrization). This scheduler is intended for models where the network is trained with a CONST-like parameterization (as in LTXV / FLUX). It approximates ComfyUI's `sample_euler_ancestral_RF` sampler and is useful when reproducing ComfyUI workflows inside diffusers. The scheduler can either: - reuse the [`FlowMatchEulerDiscreteScheduler`] sigma / timestep logic when only `num_inference_steps` is provided (default diffusers-style usage), or - follow an explicit ComfyUI-style sigma schedule when `sigmas` (or `timesteps`) are passed to [`set_timesteps`]. Args: num_train_timesteps (`int`, defaults to 1000): Included for config compatibility; not used to build the schedule. eta (`float`, defaults to 1.0): Stochasticity parameter. `eta=0.0` yields deterministic DDIM-like sampling; `eta=1.0` matches ComfyUI's default RF behavior. s_noise (`float`, defaults to 1.0): Global scaling factor for the stochastic noise term. """ # Allow config migration from the flow-match scheduler and back. _compatibles = ["FlowMatchEulerDiscreteScheduler"] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, eta: float = 1.0, s_noise: float = 1.0, ): # Note: num_train_timesteps is kept only for config compatibility. self.num_inference_steps: int = None self.sigmas: torch.Tensor | None = None self.timesteps: torch.Tensor | None = None self._step_index: int = None self._begin_index: int = None @property def step_index(self) -> int: return self._step_index @property def begin_index(self) -> int: """ The index for the first timestep. It can be set from a pipeline with `set_begin_index` to support image-to-image like workflows that start denoising part-way through the schedule. """ return self._begin_index def set_begin_index(self, begin_index: int = 0): """ Included for API compatibility; not strictly needed here but kept to allow pipelines that call `set_begin_index`. """ self._begin_index = begin_index def index_for_timestep( self, timestep: float | torch.Tensor, schedule_timesteps: torch.Tensor | None = None ) -> int: """ Map a (continuous) `timestep` value to an index into `self.timesteps`. This follows the convention used in other discrete schedulers: if the same timestep value appears multiple times in the schedule (which can happen when starting in the middle of the schedule), the *second* occurrence is used for the first `step` call so that no sigma is accidentally skipped. """ if schedule_timesteps is None: if self.timesteps is None: raise ValueError("Timesteps have not been set. Call `set_timesteps` first.") schedule_timesteps = self.timesteps if isinstance(timestep, torch.Tensor): timestep = timestep.to(schedule_timesteps.device) indices = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) pos = 1 if len(indices) > 1 else 0 if len(indices) == 0: raise ValueError( "Passed `timestep` is not in `self.timesteps`. Make sure to use values from `scheduler.timesteps`." ) return indices[pos].item() def _init_step_index(self, timestep: float | torch.Tensor): """ Initialize the internal step index based on a given timestep. """ if self.timesteps is None: raise ValueError("Timesteps have not been set. Call `set_timesteps` first.") if self.begin_index is None: if isinstance(timestep, torch.Tensor): timestep = timestep.to(self.timesteps.device) self._step_index = self.index_for_timestep(timestep) else: self._step_index = self._begin_index def set_timesteps( self, num_inference_steps: int | None = None, device: str | torch.device | None = None, sigmas: list[float] | torch.Tensor | None = None, timesteps: list[float] | torch.Tensor | None = None, mu: float | None = None, **kwargs, ): """ Set the sigma / timestep schedule for sampling. When `sigmas` or `timesteps` are provided explicitly, they are used as the RF sigma schedule (ComfyUI-style) and are expected to include the terminal 0.0. When both are `None`, the scheduler reuses the [`FlowMatchEulerDiscreteScheduler`] logic to generate sigmas from `num_inference_steps` and the stored config (including any resolution-dependent shifting, Karras/beta schedules, etc.). Args: num_inference_steps (`int`, *optional*): Number of denoising steps. If provided together with explicit `sigmas`/`timesteps`, they are expected to be consistent and are otherwise ignored with a warning. device (`str` or `torch.device`, *optional*): Device to move the internal tensors to. sigmas (`list[float]` or `torch.Tensor`, *optional*): Explicit sigma schedule, e.g. `[1.0, 0.99, ..., 0.0]`. timesteps (`list[float]` or `torch.Tensor`, *optional*): Optional alias for `sigmas`. If `sigmas` is None and `timesteps` is provided, timesteps are treated as sigmas. mu (`float`, *optional*): Optional shift parameter used when delegating to [`FlowMatchEulerDiscreteScheduler.set_timesteps`] and `config.use_dynamic_shifting` is `True`. """ # 1. Auto-generate schedule (FlowMatch-style) when no explicit sigmas/timesteps are given if sigmas is None and timesteps is None: if num_inference_steps is None: raise ValueError( "LTXEulerAncestralRFScheduler.set_timesteps requires either explicit `sigmas`/`timesteps` " "or a `num_inference_steps` value." ) # We reuse FlowMatchEulerDiscreteScheduler to construct a sigma schedule that is # consistent with the original LTX training setup (including optional time shifting, # Karras / exponential / beta schedules, etc.). from .scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler base_scheduler = FlowMatchEulerDiscreteScheduler.from_config(self.config) base_scheduler.set_timesteps( num_inference_steps=num_inference_steps, device=device, sigmas=None, mu=mu, timesteps=None, ) self.num_inference_steps = base_scheduler.num_inference_steps # Keep sigmas / timesteps on the requested device so step() can operate on-device without # extra transfers. self.sigmas = base_scheduler.sigmas.to(device=device) self.timesteps = base_scheduler.timesteps.to(device=device) self._step_index = None self._begin_index = None return # 2. Explicit sigma schedule (ComfyUI-style path) if sigmas is None: # `timesteps` is treated as sigmas in RF / flow-matching setups. sigmas = timesteps if isinstance(sigmas, list): sigmas_tensor = torch.tensor(sigmas, dtype=torch.float32) elif isinstance(sigmas, torch.Tensor): sigmas_tensor = sigmas.to(dtype=torch.float32) else: raise TypeError(f"`sigmas` must be a list or torch.Tensor, got {type(sigmas)}.") if sigmas_tensor.ndim != 1: raise ValueError(f"`sigmas` must be a 1D tensor, got shape {tuple(sigmas_tensor.shape)}.") if sigmas_tensor[-1].abs().item() > 1e-6: logger.warning( "The last sigma in the schedule is not zero (%.6f). " "For best compatibility with ComfyUI's RF sampler, the terminal sigma " "should be 0.0.", sigmas_tensor[-1].item(), ) # Move to device once, then derive timesteps. if device is not None: sigmas_tensor = sigmas_tensor.to(device) # Internal sigma schedule stays in [0, 1] (as provided). self.sigmas = sigmas_tensor # Timesteps are scaled to match the training setup of LTX (FlowMatch-style), # where the network expects timesteps on [0, num_train_timesteps]. # This keeps the transformer conditioning in the expected range while the RF # scheduler still operates on the raw sigma values. num_train = float(getattr(self.config, "num_train_timesteps", 1000)) self.timesteps = sigmas_tensor * num_train if num_inference_steps is not None and num_inference_steps != len(sigmas) - 1: logger.warning( "Provided `num_inference_steps=%d` does not match `len(sigmas)-1=%d`. " "Overriding `num_inference_steps` with `len(sigmas)-1`.", num_inference_steps, len(sigmas) - 1, ) self.num_inference_steps = len(sigmas) - 1 self._step_index = None self._begin_index = None def _sigma_broadcast(self, sigma: torch.Tensor, sample: torch.Tensor) -> torch.Tensor: """ Helper to broadcast a scalar sigma to the shape of `sample`. """ while sigma.ndim < sample.ndim: sigma = sigma.view(*sigma.shape, 1) return sigma def step( self, model_output: torch.FloatTensor, timestep: float | torch.Tensor, sample: torch.FloatTensor, generator: torch.Generator | None = None, return_dict: bool = True, ) -> LTXEulerAncestralRFSchedulerOutput | tuple[torch.FloatTensor]: """ Perform a single Euler-Ancestral RF update step. Args: model_output (`torch.FloatTensor`): Raw model output at the current step. Interpreted under the CONST parametrization as `v_t`, with denoised state reconstructed as `x0 = x_t - sigma_t * v_t`. timestep (`float` or `torch.Tensor`): The current sigma value (must match one entry in `self.timesteps`). sample (`torch.FloatTensor`): Current latent sample `x_t`. generator (`torch.Generator`, *optional*): Optional generator for reproducible noise. return_dict (`bool`): If `True`, return a `LTXEulerAncestralRFSchedulerOutput`; otherwise return a tuple where the first element is the updated sample. """ if isinstance(timestep, (int, torch.IntTensor, torch.LongTensor)): raise ValueError( ( "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" " `LTXEulerAncestralRFScheduler.step()` is not supported. Make sure to pass" " one of the `scheduler.timesteps` values as `timestep`." ), ) if self.sigmas is None or self.timesteps is None: raise ValueError("Scheduler has not been initialized. Call `set_timesteps` before `step`.") if self._step_index is None: self._init_step_index(timestep) i = self._step_index if i >= len(self.sigmas) - 1: # Already at the end; simply return the current sample. prev_sample = sample else: # Work in float32 for numerical stability sample_f = sample.to(torch.float32) model_output_f = model_output.to(torch.float32) sigma = self.sigmas[i] sigma_next = self.sigmas[i + 1] sigma_b = self._sigma_broadcast(sigma.view(1), sample_f) sigma_next_b = self._sigma_broadcast(sigma_next.view(1), sample_f) # Approximate denoised x0 under CONST parametrization: # x0 = x_t - sigma_t * v_t denoised = sample_f - sigma_b * model_output_f if sigma_next.abs().item() < 1e-8: # Final denoising step x = denoised else: eta = float(self.config.eta) s_noise = float(self.config.s_noise) # Downstep computation (ComfyUI RF variant) downstep_ratio = 1.0 + (sigma_next / sigma - 1.0) * eta sigma_down = sigma_next * downstep_ratio alpha_ip1 = 1.0 - sigma_next alpha_down = 1.0 - sigma_down # Deterministic part (Euler step in (x, x0)-space) sigma_down_b = self._sigma_broadcast(sigma_down.view(1), sample_f) alpha_ip1_b = self._sigma_broadcast(alpha_ip1.view(1), sample_f) alpha_down_b = self._sigma_broadcast(alpha_down.view(1), sample_f) sigma_ratio = sigma_down_b / sigma_b x = sigma_ratio * sample_f + (1.0 - sigma_ratio) * denoised # Stochastic ancestral noise if eta > 0.0 and s_noise > 0.0: renoise_coeff = ( (sigma_next_b**2 - sigma_down_b**2 * alpha_ip1_b**2 / (alpha_down_b**2 + 1e-12)) .clamp(min=0.0) .sqrt() ) noise = randn_tensor( sample_f.shape, generator=generator, device=sample_f.device, dtype=sample_f.dtype ) x = (alpha_ip1_b / (alpha_down_b + 1e-12)) * x + noise * renoise_coeff * s_noise prev_sample = x.to(sample.dtype) # Advance internal step index self._step_index = min(self._step_index + 1, len(self.sigmas) - 1) if not return_dict: return (prev_sample,) return LTXEulerAncestralRFSchedulerOutput(prev_sample=prev_sample) def __len__(self) -> int: # For compatibility with other schedulers; used e.g. in some training # utilities to infer the maximum number of training timesteps. return int(getattr(self.config, "num_train_timesteps", 1000))
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/schedulers/scheduling_ltx_euler_ancestral_rf.py", "license": "Apache License 2.0", "lines": 318, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/community/pipeline_z_image_differential_img2img.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import AutoTokenizer, PreTrainedModel from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, ZImageLoraLoaderMixin from diffusers.models.autoencoders import AutoencoderKL from diffusers.models.transformers import ZImageTransformer2DModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.z_image.pipeline_output import ZImagePipelineOutput from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import logging, replace_example_docstring from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from pipeline_z_image_differential_img2img import ZImageDifferentialImg2ImgPipeline >>> from diffusers.utils import load_image >>> pipe = ZImageDifferentialImg2ImgPipeline.from_pretrained("Z-a-o/Z-Image-Turbo", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> init_image = load_image( >>> "https://github.com/exx8/differential-diffusion/blob/main/assets/input.jpg?raw=true", >>> ) >>> mask = load_image( >>> "https://github.com/exx8/differential-diffusion/blob/main/assets/map.jpg?raw=true", >>> ) >>> prompt = "painting of a mountain landscape with a meadow and a forest, meadow background, anime countryside landscape, anime nature wallpap, anime landscape wallpaper, studio ghibli landscape, anime landscape, mountain behind meadow, anime background art, studio ghibli environment, background of flowery hill, anime beautiful peace scene, forrest background, anime scenery, landscape background, background art, anime scenery concept art" >>> image = pipe( ... prompt, ... image=init_image, ... mask_image=mask, ... strength=0.75, ... num_inference_steps=9, ... guidance_scale=0.0, ... generator=torch.Generator("cuda").manual_seed(41), ... ).images[0] >>> image.save("image.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class ZImageDifferentialImg2ImgPipeline(DiffusionPipeline, ZImageLoraLoaderMixin, FromSingleFileMixin): r""" The ZImage pipeline for image-to-image generation. Args: scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`PreTrainedModel`]): A text encoder model to encode text prompts. tokenizer ([`AutoTokenizer`]): A tokenizer to tokenize text prompts. transformer ([`ZImageTransformer2DModel`]): A ZImage transformer model to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: PreTrainedModel, tokenizer: AutoTokenizer, transformer: ZImageTransformer2DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, transformer=transformer, ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 ) latent_channels = self.vae.config.latent_channels if getattr(self, "vae", None) else 16 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, vae_latent_channels=latent_channels, do_normalize=False, do_binarize=False, do_convert_grayscale=True, ) # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], device: Optional[torch.device] = None, do_classifier_free_guidance: bool = True, negative_prompt: Optional[Union[str, List[str]]] = None, prompt_embeds: Optional[List[torch.FloatTensor]] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, max_sequence_length: int = 512, ): prompt = [prompt] if isinstance(prompt, str) else prompt prompt_embeds = self._encode_prompt( prompt=prompt, device=device, prompt_embeds=prompt_embeds, max_sequence_length=max_sequence_length, ) if do_classifier_free_guidance: if negative_prompt is None: negative_prompt = ["" for _ in prompt] else: negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt assert len(prompt) == len(negative_prompt) negative_prompt_embeds = self._encode_prompt( prompt=negative_prompt, device=device, prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, ) else: negative_prompt_embeds = [] return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline._encode_prompt def _encode_prompt( self, prompt: Union[str, List[str]], device: Optional[torch.device] = None, prompt_embeds: Optional[List[torch.FloatTensor]] = None, max_sequence_length: int = 512, ) -> List[torch.FloatTensor]: device = device or self._execution_device if prompt_embeds is not None: return prompt_embeds if isinstance(prompt, str): prompt = [prompt] for i, prompt_item in enumerate(prompt): messages = [ {"role": "user", "content": prompt_item}, ] prompt_item = self.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=True, ) prompt[i] = prompt_item text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) prompt_masks = text_inputs.attention_mask.to(device).bool() prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_masks, output_hidden_states=True, ).hidden_states[-2] embeddings_list = [] for i in range(len(prompt_embeds)): embeddings_list.append(prompt_embeds[i][prompt_masks[i]]) return embeddings_list # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start @staticmethod def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height // 2, width // 2, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape( latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype) def prepare_latents( self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) if latents is not None: return latents.to(device=device, dtype=dtype) # Encode the input image image = image.to(device=device, dtype=dtype) if image.shape[1] != num_channels_latents: if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) # Apply scaling (inverse of decoding: decode does latents/scaling_factor + shift_factor) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor else: image_latents = image # Handle batch size expansion if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) # Add noise using flow matching scale_noise noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.scale_noise(image_latents, timestep, noise) return latents, noise, image_latents, latent_image_ids def prepare_mask_latents( self, mask, masked_image, batch_size, num_images_per_prompt, height, width, dtype, device, generator, ): height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate(mask, size=(height, width)) mask = mask.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 16: masked_image_latents = masked_image else: masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator) masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, mask_image: PipelineImageInput = None, strength: float = 0.6, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, sigmas: Optional[List[float]] = None, guidance_scale: float = 5.0, cfg_normalization: bool = False, cfg_truncation: float = 1.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[List[torch.FloatTensor]] = None, negative_prompt_embeds: Optional[List[torch.FloatTensor]] = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, ): r""" Function invoked when calling the pipeline for image-to-image generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]`. If it's a tensor or a list of tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`. mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to mask `image`. Black pixels in the mask are repainted while white pixels are preserved. If `mask_image` is a PIL image, it is converted to a single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B, H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W, 1)`, or `(H, W)`. strength (`float`, *optional*, defaults to 0.6): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. height (`int`, *optional*, defaults to 1024): The height in pixels of the generated image. If not provided, uses the input image height. width (`int`, *optional*, defaults to 1024): The width in pixels of the generated image. If not provided, uses the input image width. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. cfg_normalization (`bool`, *optional*, defaults to False): Whether to apply configuration normalization. cfg_truncation (`float`, *optional*, defaults to 1.0): The truncation value for configuration. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`List[torch.FloatTensor]`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`List[torch.FloatTensor]`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.ZImagePipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to 512): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.z_image.ZImagePipelineOutput`] or `tuple`: [`~pipelines.z_image.ZImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 1. Check inputs and validate strength if strength < 0 or strength > 1: raise ValueError(f"The value of strength should be in [0.0, 1.0] but is {strength}") # 2. Preprocess image init_image = self.image_processor.preprocess(image) init_image = init_image.to(dtype=torch.float32) # Get dimensions from the preprocessed image if not specified if height is None: height = init_image.shape[-2] if width is None: width = init_image.shape[-1] vae_scale = self.vae_scale_factor * 2 if height % vae_scale != 0: raise ValueError( f"Height must be divisible by {vae_scale} (got {height}). " f"Please adjust the height to a multiple of {vae_scale}." ) if width % vae_scale != 0: raise ValueError( f"Width must be divisible by {vae_scale} (got {width}). " f"Please adjust the width to a multiple of {vae_scale}." ) device = self._execution_device self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False self._cfg_normalization = cfg_normalization self._cfg_truncation = cfg_truncation # 3. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = len(prompt_embeds) # If prompt_embeds is provided and prompt is None, skip encoding if prompt_embeds is not None and prompt is None: if self.do_classifier_free_guidance and negative_prompt_embeds is None: raise ValueError( "When `prompt_embeds` is provided without `prompt`, " "`negative_prompt_embeds` must also be provided for classifier-free guidance." ) else: ( prompt_embeds, negative_prompt_embeds, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, device=device, max_sequence_length=max_sequence_length, ) # 4. Prepare latent variables num_channels_latents = self.transformer.in_channels # Repeat prompt_embeds for num_images_per_prompt if num_images_per_prompt > 1: prompt_embeds = [pe for pe in prompt_embeds for _ in range(num_images_per_prompt)] if self.do_classifier_free_guidance and negative_prompt_embeds: negative_prompt_embeds = [npe for npe in negative_prompt_embeds for _ in range(num_images_per_prompt)] actual_batch_size = batch_size * num_images_per_prompt # Calculate latent dimensions for image_seq_len latent_height = 2 * (int(height) // (self.vae_scale_factor * 2)) latent_width = 2 * (int(width) // (self.vae_scale_factor * 2)) image_seq_len = (latent_height // 2) * (latent_width // 2) # 5. Prepare timesteps mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) self.scheduler.sigma_min = 0.0 scheduler_kwargs = {"mu": mu} timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, **scheduler_kwargs, ) # 6. Adjust timesteps based on strength timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline " f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(actual_batch_size) # 7. Prepare latents from image latents, noise, original_image_latents, latent_image_ids = self.prepare_latents( init_image, latent_timestep, actual_batch_size, num_channels_latents, height, width, prompt_embeds[0].dtype, device, generator, latents, ) resize_mode = "default" crops_coords = None # start diff diff preparation original_mask = self.mask_processor.preprocess( mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords ) masked_image = init_image * original_mask original_mask, _ = self.prepare_mask_latents( original_mask, masked_image, batch_size, num_images_per_prompt, height, width, prompt_embeds[0].dtype, device, generator, ) mask_thresholds = torch.arange(num_inference_steps, dtype=original_mask.dtype) / num_inference_steps mask_thresholds = mask_thresholds.reshape(-1, 1, 1, 1).to(device) masks = original_mask > mask_thresholds # end diff diff preparation num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 8. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]) timestep = (1000 - timestep) / 1000 # Normalized time for time-aware config (0 at start, 1 at end) t_norm = timestep[0].item() # Handle cfg truncation current_guidance_scale = self.guidance_scale if ( self.do_classifier_free_guidance and self._cfg_truncation is not None and float(self._cfg_truncation) <= 1 ): if t_norm > self._cfg_truncation: current_guidance_scale = 0.0 # Run CFG only if configured AND scale is non-zero apply_cfg = self.do_classifier_free_guidance and current_guidance_scale > 0 if apply_cfg: latents_typed = latents.to(self.transformer.dtype) latent_model_input = latents_typed.repeat(2, 1, 1, 1) prompt_embeds_model_input = prompt_embeds + negative_prompt_embeds timestep_model_input = timestep.repeat(2) else: latent_model_input = latents.to(self.transformer.dtype) prompt_embeds_model_input = prompt_embeds timestep_model_input = timestep latent_model_input = latent_model_input.unsqueeze(2) latent_model_input_list = list(latent_model_input.unbind(dim=0)) model_out_list = self.transformer( latent_model_input_list, timestep_model_input, prompt_embeds_model_input, )[0] if apply_cfg: # Perform CFG pos_out = model_out_list[:actual_batch_size] neg_out = model_out_list[actual_batch_size:] noise_pred = [] for j in range(actual_batch_size): pos = pos_out[j].float() neg = neg_out[j].float() pred = pos + current_guidance_scale * (pos - neg) # Renormalization if self._cfg_normalization and float(self._cfg_normalization) > 0.0: ori_pos_norm = torch.linalg.vector_norm(pos) new_pos_norm = torch.linalg.vector_norm(pred) max_new_norm = ori_pos_norm * float(self._cfg_normalization) if new_pos_norm > max_new_norm: pred = pred * (max_new_norm / new_pos_norm) noise_pred.append(pred) noise_pred = torch.stack(noise_pred, dim=0) else: noise_pred = torch.stack([t.float() for t in model_out_list], dim=0) noise_pred = noise_pred.squeeze(2) noise_pred = -noise_pred # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred.to(torch.float32), t, latents, return_dict=False)[0] assert latents.dtype == torch.float32 # start diff diff image_latent = original_image_latents latents_dtype = latents.dtype if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] image_latent = self.scheduler.scale_noise( original_image_latents, torch.tensor([noise_timestep]), noise ) mask = masks[i].to(latents_dtype) latents = image_latent * mask + latents * (1 - mask) # end diff diff if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == "latent": image = latents else: latents = latents.to(self.vae.dtype) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ZImagePipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipeline_z_image_differential_img2img.py", "license": "Apache License 2.0", "lines": 730, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/z_image/pipeline_z_image_omni.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import PIL import torch from transformers import AutoTokenizer, PreTrainedModel, Siglip2ImageProcessorFast, Siglip2VisionModel from ...loaders import FromSingleFileMixin, ZImageLoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import ZImageTransformer2DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..flux2.image_processor import Flux2ImageProcessor from .pipeline_output import ZImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import ZImageOmniPipeline >>> pipe = ZImageOmniPipeline.from_pretrained("Z-a-o/Z-Image-Turbo", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> # Optionally, set the attention backend to flash-attn 2 or 3, default is SDPA in PyTorch. >>> # (1) Use flash attention 2 >>> # pipe.transformer.set_attention_backend("flash") >>> # (2) Use flash attention 3 >>> # pipe.transformer.set_attention_backend("_flash_3") >>> prompt = "一幅为名为“造相「Z-IMAGE-TURBO」”的项目设计的创意海报。画面巧妙地将文字概念视觉化:一辆复古蒸汽小火车化身为巨大的拉链头,正拉开厚厚的冬日积雪,展露出一个生机盎然的春天。" >>> image = pipe( ... prompt, ... height=1024, ... width=1024, ... num_inference_steps=9, ... guidance_scale=0.0, ... generator=torch.Generator("cuda").manual_seed(42), ... ).images[0] >>> image.save("zimage.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class ZImageOmniPipeline(DiffusionPipeline, ZImageLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: PreTrainedModel, tokenizer: AutoTokenizer, transformer: ZImageTransformer2DModel, siglip: Siglip2VisionModel, siglip_processor: Siglip2ImageProcessorFast, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, transformer=transformer, siglip=siglip, siglip_processor=siglip_processor, ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 ) # self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.image_processor = Flux2ImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, do_classifier_free_guidance: bool = True, negative_prompt: str | list[str] | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, max_sequence_length: int = 512, num_condition_images: int = 0, ): prompt = [prompt] if isinstance(prompt, str) else prompt prompt_embeds = self._encode_prompt( prompt=prompt, device=device, prompt_embeds=prompt_embeds, max_sequence_length=max_sequence_length, num_condition_images=num_condition_images, ) if do_classifier_free_guidance: if negative_prompt is None: negative_prompt = ["" for _ in prompt] else: negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt assert len(prompt) == len(negative_prompt) negative_prompt_embeds = self._encode_prompt( prompt=negative_prompt, device=device, prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, num_condition_images=num_condition_images, ) else: negative_prompt_embeds = [] return prompt_embeds, negative_prompt_embeds def _encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, max_sequence_length: int = 512, num_condition_images: int = 0, ) -> list[torch.FloatTensor]: device = device or self._execution_device if prompt_embeds is not None: return prompt_embeds if isinstance(prompt, str): prompt = [prompt] for i, prompt_item in enumerate(prompt): if num_condition_images == 0: prompt[i] = ["<|im_start|>user\n" + prompt_item + "<|im_end|>\n<|im_start|>assistant\n"] elif num_condition_images > 0: prompt_list = ["<|im_start|>user\n<|vision_start|>"] prompt_list += ["<|vision_end|><|vision_start|>"] * (num_condition_images - 1) prompt_list += ["<|vision_end|>" + prompt_item + "<|im_end|>\n<|im_start|>assistant\n<|vision_start|>"] prompt_list += ["<|vision_end|><|im_end|>"] prompt[i] = prompt_list flattened_prompt = [] prompt_list_lengths = [] for i in range(len(prompt)): prompt_list_lengths.append(len(prompt[i])) flattened_prompt.extend(prompt[i]) text_inputs = self.tokenizer( flattened_prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) prompt_masks = text_inputs.attention_mask.to(device).bool() prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_masks, output_hidden_states=True, ).hidden_states[-2] embeddings_list = [] start_idx = 0 for i in range(len(prompt_list_lengths)): batch_embeddings = [] end_idx = start_idx + prompt_list_lengths[i] for j in range(start_idx, end_idx): batch_embeddings.append(prompt_embeds[j][prompt_masks[j]]) embeddings_list.append(batch_embeddings) start_idx = end_idx return embeddings_list def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) return latents def prepare_image_latents( self, images: list[torch.Tensor], batch_size, device, dtype, ): image_latents = [] for image in images: image = image.to(device=device, dtype=dtype) image_latent = ( self.vae.encode(image.bfloat16()).latent_dist.mode()[0] - self.vae.config.shift_factor ) * self.vae.config.scaling_factor image_latent = image_latent.unsqueeze(1).to(dtype) image_latents.append(image_latent) # (16, 128, 128) # image_latents = [image_latents] * batch_size image_latents = [image_latents.copy() for _ in range(batch_size)] return image_latents def prepare_siglip_embeds( self, images: list[torch.Tensor], batch_size, device, dtype, ): siglip_embeds = [] for image in images: siglip_inputs = self.siglip_processor(images=[image], return_tensors="pt").to(device) shape = siglip_inputs.spatial_shapes[0] hidden_state = self.siglip(**siglip_inputs).last_hidden_state B, N, C = hidden_state.shape hidden_state = hidden_state[:, : shape[0] * shape[1]] hidden_state = hidden_state.view(shape[0], shape[1], C) siglip_embeds.append(hidden_state.to(dtype)) # siglip_embeds = [siglip_embeds] * batch_size siglip_embeds = [siglip_embeds.copy() for _ in range(batch_size)] return siglip_embeds @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: list[PIL.Image.Image, PIL.Image.Image] | None = None, prompt: str | list[str] = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, sigmas: list[float] | None = None, guidance_scale: float = 5.0, cfg_normalization: bool = False, cfg_truncation: float = 1.0, negative_prompt: str | list[str] | None = None, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, negative_prompt_embeds: list[torch.FloatTensor] | None = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" Function invoked when calling the pipeline for generation. Args: image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `list[torch.Tensor]`, `list[PIL.Image.Image]`, or `list[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to 1024): The height in pixels of the generated image. width (`int`, *optional*, defaults to 1024): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. cfg_normalization (`bool`, *optional*, defaults to False): Whether to apply configuration normalization. cfg_truncation (`float`, *optional*, defaults to 1.0): The truncation value for configuration. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`list[torch.FloatTensor]`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`list[torch.FloatTensor]`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.ZImagePipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to 512): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.z_image.ZImagePipelineOutput`] or `tuple`: [`~pipelines.z_image.ZImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ if image is not None and not isinstance(image, list): image = [image] num_condition_images = len(image) if image is not None else 0 device = self._execution_device self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False self._cfg_normalization = cfg_normalization self._cfg_truncation = cfg_truncation # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = len(prompt_embeds) # If prompt_embeds is provided and prompt is None, skip encoding if prompt_embeds is not None and prompt is None: if self.do_classifier_free_guidance and negative_prompt_embeds is None: raise ValueError( "When `prompt_embeds` is provided without `prompt`, " "`negative_prompt_embeds` must also be provided for classifier-free guidance." ) else: ( prompt_embeds, negative_prompt_embeds, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, device=device, max_sequence_length=max_sequence_length, num_condition_images=num_condition_images, ) # 3. Process condition images. Copied from diffusers.pipelines.flux2.pipeline_flux2 condition_images = [] resized_images = [] if image is not None: for img in image: self.image_processor.check_image_input(img) for img in image: image_width, image_height = img.size if image_width * image_height > 1024 * 1024: if height is not None and width is not None: img = self.image_processor._resize_to_target_area(img, height * width) else: img = self.image_processor._resize_to_target_area(img, 1024 * 1024) image_width, image_height = img.size resized_images.append(img) multiple_of = self.vae_scale_factor * 2 image_width = (image_width // multiple_of) * multiple_of image_height = (image_height // multiple_of) * multiple_of img = self.image_processor.preprocess(img, height=image_height, width=image_width, resize_mode="crop") condition_images.append(img) if len(condition_images) > 0: height = height or image_height width = width or image_width else: height = height or 1024 width = width or 1024 vae_scale = self.vae_scale_factor * 2 if height % vae_scale != 0: raise ValueError( f"Height must be divisible by {vae_scale} (got {height}). " f"Please adjust the height to a multiple of {vae_scale}." ) if width % vae_scale != 0: raise ValueError( f"Width must be divisible by {vae_scale} (got {width}). " f"Please adjust the width to a multiple of {vae_scale}." ) # 4. Prepare latent variables num_channels_latents = self.transformer.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, torch.float32, device, generator, latents, ) condition_latents = self.prepare_image_latents( images=condition_images, batch_size=batch_size * num_images_per_prompt, device=device, dtype=torch.float32, ) condition_latents = [[lat.to(self.transformer.dtype) for lat in lats] for lats in condition_latents] if self.do_classifier_free_guidance: negative_condition_latents = [[lat.clone() for lat in batch] for batch in condition_latents] condition_siglip_embeds = self.prepare_siglip_embeds( images=resized_images, batch_size=batch_size * num_images_per_prompt, device=device, dtype=torch.float32, ) condition_siglip_embeds = [[se.to(self.transformer.dtype) for se in sels] for sels in condition_siglip_embeds] if self.do_classifier_free_guidance: negative_condition_siglip_embeds = [[se.clone() for se in batch] for batch in condition_siglip_embeds] # Repeat prompt_embeds for num_images_per_prompt if num_images_per_prompt > 1: prompt_embeds = [pe for pe in prompt_embeds for _ in range(num_images_per_prompt)] if self.do_classifier_free_guidance and negative_prompt_embeds: negative_prompt_embeds = [npe for npe in negative_prompt_embeds for _ in range(num_images_per_prompt)] condition_siglip_embeds = [None if sels == [] else sels + [None] for sels in condition_siglip_embeds] negative_condition_siglip_embeds = [ None if sels == [] else sels + [None] for sels in negative_condition_siglip_embeds ] actual_batch_size = batch_size * num_images_per_prompt image_seq_len = (latents.shape[2] // 2) * (latents.shape[3] // 2) # 5. Prepare timesteps mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) self.scheduler.sigma_min = 0.0 scheduler_kwargs = {"mu": mu} timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, **scheduler_kwargs, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]) timestep = (1000 - timestep) / 1000 # Normalized time for time-aware config (0 at start, 1 at end) t_norm = timestep[0].item() # Handle cfg truncation current_guidance_scale = self.guidance_scale if ( self.do_classifier_free_guidance and self._cfg_truncation is not None and float(self._cfg_truncation) <= 1 ): if t_norm > self._cfg_truncation: current_guidance_scale = 0.0 # Run CFG only if configured AND scale is non-zero apply_cfg = self.do_classifier_free_guidance and current_guidance_scale > 0 if apply_cfg: latents_typed = latents.to(self.transformer.dtype) latent_model_input = latents_typed.repeat(2, 1, 1, 1) prompt_embeds_model_input = prompt_embeds + negative_prompt_embeds condition_latents_model_input = condition_latents + negative_condition_latents condition_siglip_embeds_model_input = condition_siglip_embeds + negative_condition_siglip_embeds timestep_model_input = timestep.repeat(2) else: latent_model_input = latents.to(self.transformer.dtype) prompt_embeds_model_input = prompt_embeds condition_latents_model_input = condition_latents condition_siglip_embeds_model_input = condition_siglip_embeds timestep_model_input = timestep latent_model_input = latent_model_input.unsqueeze(2) latent_model_input_list = list(latent_model_input.unbind(dim=0)) # Combine condition latents with target latent current_batch_size = len(latent_model_input_list) x_combined = [ condition_latents_model_input[i] + [latent_model_input_list[i]] for i in range(current_batch_size) ] # Create noise mask: 0 for condition images (clean), 1 for target image (noisy) image_noise_mask = [ [0] * len(condition_latents_model_input[i]) + [1] for i in range(current_batch_size) ] model_out_list = self.transformer( x=x_combined, t=timestep_model_input, cap_feats=prompt_embeds_model_input, siglip_feats=condition_siglip_embeds_model_input, image_noise_mask=image_noise_mask, return_dict=False, )[0] if apply_cfg: # Perform CFG pos_out = model_out_list[:actual_batch_size] neg_out = model_out_list[actual_batch_size:] noise_pred = [] for j in range(actual_batch_size): pos = pos_out[j].float() neg = neg_out[j].float() pred = pos + current_guidance_scale * (pos - neg) # Renormalization if self._cfg_normalization and float(self._cfg_normalization) > 0.0: ori_pos_norm = torch.linalg.vector_norm(pos) new_pos_norm = torch.linalg.vector_norm(pred) max_new_norm = ori_pos_norm * float(self._cfg_normalization) if new_pos_norm > max_new_norm: pred = pred * (max_new_norm / new_pos_norm) noise_pred.append(pred) noise_pred = torch.stack(noise_pred, dim=0) else: noise_pred = torch.stack([t.float() for t in model_out_list], dim=0) noise_pred = noise_pred.squeeze(2) noise_pred = -noise_pred # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred.to(torch.float32), t, latents, return_dict=False)[0] assert latents.dtype == torch.float32 if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == "latent": image = latents else: latents = latents.to(self.vae.dtype) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ZImagePipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/z_image/pipeline_z_image_omni.py", "license": "Apache License 2.0", "lines": 649, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/guiders/magnitude_aware_guidance.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import TYPE_CHECKING import torch from ..configuration_utils import register_to_config from .guider_utils import BaseGuidance, GuiderOutput, rescale_noise_cfg if TYPE_CHECKING: from ..modular_pipelines.modular_pipeline import BlockState class MagnitudeAwareGuidance(BaseGuidance): """ Magnitude-Aware Mitigation for Boosted Guidance (MAMBO-G): https://huggingface.co/papers/2508.03442 Args: guidance_scale (`float`, defaults to `10.0`): The scale parameter for classifier-free guidance. Higher values result in stronger conditioning on the text prompt, while lower values allow for more freedom in generation. Higher values may lead to saturation and deterioration of image quality. alpha (`float`, defaults to `8.0`): The alpha parameter for the magnitude-aware guidance. Higher values cause more aggressive supression of guidance scale when the magnitude of the guidance update is large. guidance_rescale (`float`, defaults to `0.0`): The rescale factor applied to the noise predictions. This is used to improve image quality and fix overexposure. Based on Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). use_original_formulation (`bool`, defaults to `False`): Whether to use the original formulation of classifier-free guidance as proposed in the paper. By default, we use the diffusers-native implementation that has been in the codebase for a long time. See [~guiders.classifier_free_guidance.ClassifierFreeGuidance] for more details. start (`float`, defaults to `0.0`): The fraction of the total number of denoising steps after which guidance starts. stop (`float`, defaults to `1.0`): The fraction of the total number of denoising steps after which guidance stops. """ _input_predictions = ["pred_cond", "pred_uncond"] @register_to_config def __init__( self, guidance_scale: float = 10.0, alpha: float = 8.0, guidance_rescale: float = 0.0, use_original_formulation: bool = False, start: float = 0.0, stop: float = 1.0, enabled: bool = True, ): super().__init__(start, stop, enabled) self.guidance_scale = guidance_scale self.alpha = alpha self.guidance_rescale = guidance_rescale self.use_original_formulation = use_original_formulation def prepare_inputs(self, data: dict[str, tuple[torch.Tensor, torch.Tensor]]) -> list["BlockState"]: tuple_indices = [0] if self.num_conditions == 1 else [0, 1] data_batches = [] for tuple_idx, input_prediction in zip(tuple_indices, self._input_predictions): data_batch = self._prepare_batch(data, tuple_idx, input_prediction) data_batches.append(data_batch) return data_batches def prepare_inputs_from_block_state( self, data: "BlockState", input_fields: dict[str, str | tuple[str, str]] ) -> list["BlockState"]: tuple_indices = [0] if self.num_conditions == 1 else [0, 1] data_batches = [] for tuple_idx, input_prediction in zip(tuple_indices, self._input_predictions): data_batch = self._prepare_batch_from_block_state(input_fields, data, tuple_idx, input_prediction) data_batches.append(data_batch) return data_batches def forward(self, pred_cond: torch.Tensor, pred_uncond: torch.Tensor | None = None) -> GuiderOutput: pred = None if not self._is_mambo_g_enabled(): pred = pred_cond else: pred = mambo_guidance( pred_cond, pred_uncond, self.guidance_scale, self.alpha, self.use_original_formulation, ) if self.guidance_rescale > 0.0: pred = rescale_noise_cfg(pred, pred_cond, self.guidance_rescale) return GuiderOutput(pred=pred, pred_cond=pred_cond, pred_uncond=pred_uncond) @property def is_conditional(self) -> bool: return self._count_prepared == 1 @property def num_conditions(self) -> int: num_conditions = 1 if self._is_mambo_g_enabled(): num_conditions += 1 return num_conditions def _is_mambo_g_enabled(self) -> bool: if not self._enabled: return False is_within_range = True if self._num_inference_steps is not None: skip_start_step = int(self._start * self._num_inference_steps) skip_stop_step = int(self._stop * self._num_inference_steps) is_within_range = skip_start_step <= self._step < skip_stop_step is_close = False if self.use_original_formulation: is_close = math.isclose(self.guidance_scale, 0.0) else: is_close = math.isclose(self.guidance_scale, 1.0) return is_within_range and not is_close def mambo_guidance( pred_cond: torch.Tensor, pred_uncond: torch.Tensor, guidance_scale: float, alpha: float = 8.0, use_original_formulation: bool = False, ): dim = list(range(1, len(pred_cond.shape))) diff = pred_cond - pred_uncond ratio = torch.norm(diff, dim=dim, keepdim=True) / torch.norm(pred_uncond, dim=dim, keepdim=True) guidance_scale_final = ( guidance_scale * torch.exp(-alpha * ratio) if use_original_formulation else 1.0 + (guidance_scale - 1.0) * torch.exp(-alpha * ratio) ) pred = pred_cond if use_original_formulation else pred_uncond pred = pred + guidance_scale_final * diff return pred
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/guiders/magnitude_aware_guidance.py", "license": "Apache License 2.0", "lines": 133, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py
# Copyright 2025 The NVIDIA Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable import numpy as np import torch import torchvision import torchvision.transforms import torchvision.transforms.functional from transformers import AutoTokenizer, Qwen2_5_VLForConditionalGeneration from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...models import AutoencoderKLWan, CosmosTransformer3DModel from ...schedulers import UniPCMultistepScheduler from ...utils import is_cosmos_guardrail_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import CosmosPipelineOutput if is_cosmos_guardrail_available(): from cosmos_guardrail import CosmosSafetyChecker else: class CosmosSafetyChecker: def __init__(self, *args, **kwargs): raise ImportError( "`cosmos_guardrail` is not installed. Please install it to use the safety checker for Cosmos: `pip install cosmos_guardrail`." ) if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_NEGATIVE_PROMPT = ( "The video captures a series of frames showing ugly scenes, static with no motion, motion blur, " "over-saturation, shaky footage, low resolution, grainy texture, pixelated images, poorly lit areas, " "underexposed and overexposed scenes, poor color balance, washed out colors, choppy sequences, " "jerky movements, low frame rate, artifacting, color banding, unnatural transitions, outdated special effects, " "fake elements, unconvincing visuals, poorly edited content, jump cuts, visual noise, and flickering. " "Overall, the video is of poor quality." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import Cosmos2_5_PredictBasePipeline >>> from diffusers.utils import export_to_video, load_image, load_video >>> model_id = "nvidia/Cosmos-Predict2.5-2B" >>> pipe = Cosmos2_5_PredictBasePipeline.from_pretrained( ... model_id, revision="diffusers/base/post-trained", torch_dtype=torch.bfloat16 ... ) >>> pipe = pipe.to("cuda") >>> # Common negative prompt reused across modes. >>> negative_prompt = ( ... "The video captures a series of frames showing ugly scenes, static with no motion, motion blur, " ... "over-saturation, shaky footage, low resolution, grainy texture, pixelated images, poorly lit areas, " ... "underexposed and overexposed scenes, poor color balance, washed out colors, choppy sequences, jerky " ... "movements, low frame rate, artifacting, color banding, unnatural transitions, outdated special effects, " ... "fake elements, unconvincing visuals, poorly edited content, jump cuts, visual noise, and flickering. " ... "Overall, the video is of poor quality." ... ) >>> # Text2World: generate a 93-frame world video from text only. >>> prompt = ( ... "As the red light shifts to green, the red bus at the intersection begins to move forward, its headlights " ... "cutting through the falling snow. The snowy tire tracks deepen as the vehicle inches ahead, casting fresh " ... "lines onto the slushy road. Around it, streetlights glow warmer, illuminating the drifting flakes and wet " ... "reflections on the asphalt. Other cars behind start to edge forward, their beams joining the scene. " ... "The stillness of the urban street transitions into motion as the quiet snowfall is punctuated by the slow " ... "advance of traffic through the frosty city corridor." ... ) >>> video = pipe( ... image=None, ... video=None, ... prompt=prompt, ... negative_prompt=negative_prompt, ... num_frames=93, ... generator=torch.Generator().manual_seed(1), ... ).frames[0] >>> export_to_video(video, "text2world.mp4", fps=16) >>> # Image2World: condition on a single image and generate a 93-frame world video. >>> prompt = ( ... "A high-definition video captures the precision of robotic welding in an industrial setting. " ... "The first frame showcases a robotic arm, equipped with a welding torch, positioned over a large metal structure. " ... "The welding process is in full swing, with bright sparks and intense light illuminating the scene, creating a vivid " ... "display of blue and white hues. A significant amount of smoke billows around the welding area, partially obscuring " ... "the view but emphasizing the heat and activity. The background reveals parts of the workshop environment, including a " ... "ventilation system and various pieces of machinery, indicating a busy and functional industrial workspace. As the video " ... "progresses, the robotic arm maintains its steady position, continuing the welding process and moving to its left. " ... "The welding torch consistently emits sparks and light, and the smoke continues to rise, diffusing slightly as it moves upward. " ... "The metal surface beneath the torch shows ongoing signs of heating and melting. The scene retains its industrial ambiance, with " ... "the welding sparks and smoke dominating the visual field, underscoring the ongoing nature of the welding operation." ... ) >>> image = load_image( ... "https://media.githubusercontent.com/media/nvidia-cosmos/cosmos-predict2.5/refs/heads/main/assets/base/robot_welding.jpg" ... ) >>> video = pipe( ... image=image, ... video=None, ... prompt=prompt, ... negative_prompt=negative_prompt, ... num_frames=93, ... generator=torch.Generator().manual_seed(1), ... ).frames[0] >>> export_to_video(video, "image2world.mp4", fps=16) >>> # Video2World: condition on an input clip and predict a 93-frame world video. >>> prompt = ( ... "The video opens with an aerial view of a large-scale sand mining construction operation, showcasing extensive piles " ... "of brown sand meticulously arranged in parallel rows. A central water channel, fed by a water pipe, flows through the " ... "middle of these sand heaps, creating ripples and movement as it cascades down. The surrounding area features dense green " ... "vegetation on the left, contrasting with the sandy terrain, while a body of water is visible in the background on the right. " ... "As the video progresses, a piece of heavy machinery, likely a bulldozer, enters the frame from the right, moving slowly along " ... "the edge of the sand piles. This machinery's presence indicates ongoing construction work in the operation. The final frame " ... "captures the same scene, with the water continuing its flow and the bulldozer still in motion, maintaining the dynamic yet " ... "steady pace of the construction activity." ... ) >>> input_video = load_video( ... "https://github.com/nvidia-cosmos/cosmos-predict2.5/raw/refs/heads/main/assets/base/sand_mining.mp4" ... ) >>> video = pipe( ... image=None, ... video=input_video, ... prompt=prompt, ... negative_prompt=negative_prompt, ... num_frames=93, ... generator=torch.Generator().manual_seed(1), ... ).frames[0] >>> export_to_video(video, "video2world.mp4", fps=16) >>> # To produce an image instead of a world (video) clip, set num_frames=1 and >>> # save the first frame: pipe(..., num_frames=1).frames[0][0]. ``` """ class Cosmos2_5_PredictBasePipeline(DiffusionPipeline): r""" Pipeline for [Cosmos Predict2.5](https://github.com/nvidia-cosmos/cosmos-predict2.5) base model. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): Frozen text-encoder. Cosmos Predict2.5 uses the [Qwen2.5 VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) encoder. tokenizer (`AutoTokenizer`): Tokenizer associated with the Qwen2.5 VL encoder. transformer ([`CosmosTransformer3DModel`]): Conditional Transformer to denoise the encoded image latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] # We mark safety_checker as optional here to get around some test failures, but it is not really optional _optional_components = ["safety_checker"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, text_encoder: Qwen2_5_VLForConditionalGeneration, tokenizer: AutoTokenizer, transformer: CosmosTransformer3DModel, vae: AutoencoderKLWan, scheduler: UniPCMultistepScheduler, safety_checker: CosmosSafetyChecker = None, ): super().__init__() if safety_checker is None: safety_checker = CosmosSafetyChecker() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, safety_checker=safety_checker, ) self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) latents_mean = ( torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1).float() if getattr(self.vae.config, "latents_mean", None) is not None else None ) latents_std = ( torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).float() if getattr(self.vae.config, "latents_std", None) is not None else None ) self.latents_mean = latents_mean self.latents_std = latents_std if self.latents_mean is None or self.latents_std is None: raise ValueError("VAE configuration must define both `latents_mean` and `latents_std`.") def _get_prompt_embeds( self, prompt: str | list[str] = None, max_sequence_length: int = 512, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt input_ids_batch = [] for sample_idx in range(len(prompt)): conversations = [ { "role": "system", "content": [ { "type": "text", "text": "You are a helpful assistant who will provide prompts to an image generator.", } ], }, { "role": "user", "content": [ { "type": "text", "text": prompt[sample_idx], } ], }, ] input_ids = self.tokenizer.apply_chat_template( conversations, tokenize=True, add_generation_prompt=False, add_vision_id=False, max_length=max_sequence_length, truncation=True, padding="max_length", ) input_ids = ( input_ids["input_ids"] if not isinstance(input_ids, list) and "input_ids" in input_ids else input_ids ) input_ids = torch.LongTensor(input_ids) input_ids_batch.append(input_ids) input_ids_batch = torch.stack(input_ids_batch, dim=0) outputs = self.text_encoder( input_ids_batch.to(device), output_hidden_states=True, ) hidden_states = outputs.hidden_states normalized_hidden_states = [] for layer_idx in range(1, len(hidden_states)): normalized_state = (hidden_states[layer_idx] - hidden_states[layer_idx].mean(dim=-1, keepdim=True)) / ( hidden_states[layer_idx].std(dim=-1, keepdim=True) + 1e-8 ) normalized_hidden_states.append(normalized_state) prompt_embeds = torch.cat(normalized_hidden_states, dim=-1) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) return prompt_embeds # Modified from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, max_sequence_length: int = 512, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_prompt_embeds( prompt=prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt if negative_prompt is not None else DEFAULT_NEGATIVE_PROMPT negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_prompt_embeds( prompt=negative_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = negative_prompt_embeds.shape negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds, negative_prompt_embeds # Modified from diffusers.pipelines.cosmos.pipeline_cosmos2_video2world.Cosmos2VideoToWorldPipeline.prepare_latents and # diffusers.pipelines.cosmos.pipeline_cosmos2_video2world.Cosmos2TextToImagePipeline.prepare_latents def prepare_latents( self, video: torch.Tensor | None, batch_size: int, num_channels_latents: int = 16, height: int = 704, width: int = 1280, num_frames_in: int = 93, num_frames_out: int = 93, do_classifier_free_guidance: bool = True, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) B = batch_size C = num_channels_latents T = (num_frames_out - 1) // self.vae_scale_factor_temporal + 1 H = height // self.vae_scale_factor_spatial W = width // self.vae_scale_factor_spatial shape = (B, C, T, H, W) if num_frames_in == 0: if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) cond_mask = torch.zeros((B, 1, T, H, W), dtype=latents.dtype, device=latents.device) cond_indicator = torch.zeros((B, 1, T, 1, 1), dtype=latents.dtype, device=latents.device) cond_latents = torch.zeros_like(latents) return ( latents, cond_latents, cond_mask, cond_indicator, ) else: if video is None: raise ValueError("`video` must be provided when `num_frames_in` is greater than 0.") needs_preprocessing = not (isinstance(video, torch.Tensor) and video.ndim == 5 and video.shape[1] == 3) if needs_preprocessing: video = self.video_processor.preprocess_video(video, height, width) video = video.to(device=device, dtype=self.vae.dtype) if isinstance(generator, list): cond_latents = [ retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator=generator[i]) for i in range(batch_size) ] else: cond_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video] cond_latents = torch.cat(cond_latents, dim=0).to(dtype) latents_mean = self.latents_mean.to(device=device, dtype=dtype) latents_std = self.latents_std.to(device=device, dtype=dtype) cond_latents = (cond_latents - latents_mean) / latents_std if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) padding_shape = (B, 1, T, H, W) ones_padding = latents.new_ones(padding_shape) zeros_padding = latents.new_zeros(padding_shape) num_cond_latent_frames = (num_frames_in - 1) // self.vae_scale_factor_temporal + 1 cond_indicator = latents.new_zeros(1, 1, latents.size(2), 1, 1) cond_indicator[:, :, 0:num_cond_latent_frames] = 1.0 cond_mask = cond_indicator * ones_padding + (1 - cond_indicator) * zeros_padding return ( latents, cond_latents, cond_mask, cond_indicator, ) # Copied from diffusers.pipelines.cosmos.pipeline_cosmos_text2world.CosmosTextToWorldPipeline.check_inputs def check_inputs( self, prompt, height, width, prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput | None = None, video: list[PipelineImageInput] | None = None, prompt: str | list[str] | None = None, negative_prompt: str | list[str] | None = None, height: int = 704, width: int = 1280, num_frames: int = 93, num_inference_steps: int = 36, guidance_scale: float = 7.0, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, conditional_frame_timestep: float = 0.1, num_latent_conditional_frames: int = 2, ): r""" The call function to the pipeline for generation. Supports three modes: - **Text2World**: `image=None`, `video=None`, `prompt` provided. Generates a world clip. - **Image2World**: `image` provided, `video=None`, `prompt` provided. Conditions on a single frame. - **Video2World**: `video` provided, `image=None`, `prompt` provided. Conditions on an input clip. Set `num_frames=93` (default) to produce a world video, or `num_frames=1` to produce a single image frame (the above in "*2Image mode"). Outputs follow `output_type` (e.g., `"pil"` returns a list of `num_frames` PIL images per prompt). Args: image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, *optional*): Optional single image for Image2World conditioning. Must be `None` when `video` is provided. video (`list[PIL.Image.Image]`, `np.ndarray`, `torch.Tensor`, *optional*): Optional input video for Video2World conditioning. Must be `None` when `image` is provided. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide generation. Required unless `prompt_embeds` is supplied. height (`int`, defaults to `704`): The height in pixels of the generated image. width (`int`, defaults to `1280`): The width in pixels of the generated image. num_frames (`int`, defaults to `93`): Number of output frames. Use `93` for world (video) generation; set to `1` to return a single frame. num_inference_steps (`int`, defaults to `35`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `7.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`CosmosPipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `512`): The maximum number of tokens in the prompt. If the prompt exceeds this length, it will be truncated. If the prompt is shorter than this length, it will be padded. num_latent_conditional_frames (`int`, defaults to `2`): Number of latent conditional frames to use for Video2World conditioning. The number of pixel frames extracted from the input video is calculated as `4 * (num_latent_conditional_frames - 1) + 1`. Set to 1 for Image2World-like behavior (single frame conditioning). Examples: Returns: [`~CosmosPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`CosmosPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if self.safety_checker is None: raise ValueError( f"You have disabled the safety checker for {self.__class__}. This is in violation of the " "[NVIDIA Open Model License Agreement](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license). " f"Please ensure that you are compliant with the license agreement." ) if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, prompt_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._current_timestep = None self._interrupt = False device = self._execution_device if self.safety_checker is not None: self.safety_checker.to(device) if prompt is not None: prompt_list = [prompt] if isinstance(prompt, str) else prompt for p in prompt_list: if not self.safety_checker.check_text_safety(p): raise ValueError( f"Cosmos Guardrail detected unsafe text in the prompt: {p}. Please ensure that the " f"prompt abides by the NVIDIA Open Model License Agreement." ) # Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Encode input prompt ( prompt_embeds, negative_prompt_embeds, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, device=device, max_sequence_length=max_sequence_length, ) vae_dtype = self.vae.dtype transformer_dtype = self.transformer.dtype num_frames_in = None if image is not None: if batch_size != 1: raise ValueError(f"batch_size must be 1 for image input (given {batch_size})") image = torchvision.transforms.functional.to_tensor(image).unsqueeze(0) video = torch.cat([image, torch.zeros_like(image).repeat(num_frames - 1, 1, 1, 1)], dim=0) video = video.unsqueeze(0) num_frames_in = 1 elif video is None: video = torch.zeros(batch_size, num_frames, 3, height, width, dtype=torch.uint8) num_frames_in = 0 else: if batch_size != 1: raise ValueError(f"batch_size must be 1 for video input (given {batch_size})") if num_latent_conditional_frames not in [1, 2]: raise ValueError( f"num_latent_conditional_frames must be 1 or 2, but got {num_latent_conditional_frames}" ) frames_to_extract = 4 * (num_latent_conditional_frames - 1) + 1 total_input_frames = len(video) if total_input_frames < frames_to_extract: raise ValueError( f"Input video has only {total_input_frames} frames but Video2World requires at least " f"{frames_to_extract} frames for conditioning." ) num_frames_in = frames_to_extract assert video is not None video = self.video_processor.preprocess_video(video, height, width) # For Video2World: extract last frames_to_extract frames from input, then pad if image is None and num_frames_in > 0 and num_frames_in < video.shape[2]: video = video[:, :, -num_frames_in:, :, :] num_frames_out = num_frames if video.shape[2] < num_frames_out: n_pad_frames = num_frames_out - video.shape[2] last_frame = video[:, :, -1:, :, :] # [B, C, T==1, H, W] pad_frames = last_frame.repeat(1, 1, n_pad_frames, 1, 1) # [B, C, T, H, W] video = torch.cat((video, pad_frames), dim=2) assert num_frames_in <= num_frames_out, f"expected ({num_frames_in=}) <= ({num_frames_out=})" video = video.to(device=device, dtype=vae_dtype) num_channels_latents = self.transformer.config.in_channels - 1 latents, cond_latent, cond_mask, cond_indicator = self.prepare_latents( video=video, batch_size=batch_size * num_videos_per_prompt, num_channels_latents=num_channels_latents, height=height, width=width, num_frames_in=num_frames_in, num_frames_out=num_frames, do_classifier_free_guidance=self.do_classifier_free_guidance, dtype=torch.float32, device=device, generator=generator, latents=latents, ) cond_timestep = torch.ones_like(cond_indicator) * conditional_frame_timestep cond_mask = cond_mask.to(transformer_dtype) padding_mask = latents.new_zeros(1, 1, height, width, dtype=transformer_dtype) # Denoising loop self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps self._num_timesteps = len(timesteps) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order gt_velocity = (latents - cond_latent) * cond_mask with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t.cpu().item() # NOTE: assumes sigma(t) \in [0, 1] sigma_t = ( torch.tensor(self.scheduler.sigmas[i].item()) .unsqueeze(0) .to(device=device, dtype=transformer_dtype) ) in_latents = cond_mask * cond_latent + (1 - cond_mask) * latents in_latents = in_latents.to(transformer_dtype) in_timestep = cond_indicator * cond_timestep + (1 - cond_indicator) * sigma_t noise_pred = self.transformer( hidden_states=in_latents, condition_mask=cond_mask, timestep=in_timestep, encoder_hidden_states=prompt_embeds, padding_mask=padding_mask, return_dict=False, )[0] # NOTE: replace velocity (noise_pred) with gt_velocity for conditioning inputs only noise_pred = gt_velocity + noise_pred * (1 - cond_mask) if self.do_classifier_free_guidance: noise_pred_neg = self.transformer( hidden_states=in_latents, condition_mask=cond_mask, timestep=in_timestep, encoder_hidden_states=negative_prompt_embeds, padding_mask=padding_mask, return_dict=False, )[0] # NOTE: replace velocity (noise_pred_neg) with gt_velocity for conditioning inputs only noise_pred_neg = gt_velocity + noise_pred_neg * (1 - cond_mask) noise_pred = noise_pred + self.guidance_scale * (noise_pred - noise_pred_neg) latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents_mean = self.latents_mean.to(latents.device, latents.dtype) latents_std = self.latents_std.to(latents.device, latents.dtype) latents = latents * latents_std + latents_mean video = self.vae.decode(latents.to(self.vae.dtype), return_dict=False)[0] video = self._match_num_frames(video, num_frames) assert self.safety_checker is not None self.safety_checker.to(device) video = self.video_processor.postprocess_video(video, output_type="np") video = (video * 255).astype(np.uint8) video_batch = [] for vid in video: vid = self.safety_checker.check_video_safety(vid) video_batch.append(vid) video = np.stack(video_batch).astype(np.float32) / 255.0 * 2 - 1 video = torch.from_numpy(video).permute(0, 4, 1, 2, 3) video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return CosmosPipelineOutput(frames=video) def _match_num_frames(self, video: torch.Tensor, target_num_frames: int) -> torch.Tensor: if target_num_frames <= 0 or video.shape[2] == target_num_frames: return video frames_per_latent = max(self.vae_scale_factor_temporal, 1) video = torch.repeat_interleave(video, repeats=frames_per_latent, dim=2) current_frames = video.shape[2] if current_frames < target_num_frames: pad = video[:, :, -1:, :, :].repeat(1, 1, target_num_frames - current_frames, 1, 1) video = torch.cat([video, pad], dim=2) elif current_frames > target_num_frames: video = video[:, :, :target_num_frames] return video
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py", "license": "Apache License 2.0", "lines": 760, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/cosmos/test_cosmos2_5_predict.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import os import tempfile import unittest import numpy as np import torch from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer from diffusers import ( AutoencoderKLWan, Cosmos2_5_PredictBasePipeline, CosmosTransformer3DModel, UniPCMultistepScheduler, ) from ...testing_utils import enable_full_determinism, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np from .cosmos_guardrail import DummyCosmosSafetyChecker enable_full_determinism() class Cosmos2_5_PredictBaseWrapper(Cosmos2_5_PredictBasePipeline): @staticmethod def from_pretrained(*args, **kwargs): if "safety_checker" not in kwargs or kwargs["safety_checker"] is None: safety_checker = DummyCosmosSafetyChecker() device_map = kwargs.get("device_map", "cpu") torch_dtype = kwargs.get("torch_dtype") if device_map is not None or torch_dtype is not None: safety_checker = safety_checker.to(device_map, dtype=torch_dtype) kwargs["safety_checker"] = safety_checker return Cosmos2_5_PredictBasePipeline.from_pretrained(*args, **kwargs) class Cosmos2_5_PredictPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Cosmos2_5_PredictBaseWrapper params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def get_dummy_components(self): torch.manual_seed(0) transformer = CosmosTransformer3DModel( in_channels=16 + 1, out_channels=16, num_attention_heads=2, attention_head_dim=16, num_layers=2, mlp_ratio=2, text_embed_dim=32, adaln_lora_dim=4, max_size=(4, 32, 32), patch_size=(1, 2, 2), rope_scale=(2.0, 1.0, 1.0), concat_padding_mask=True, extra_pos_embed_type="learnable", ) torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) scheduler = UniPCMultistepScheduler() torch.manual_seed(0) config = Qwen2_5_VLConfig( text_config={ "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 2, "num_attention_heads": 2, "num_key_value_heads": 2, "rope_scaling": { "mrope_section": [1, 1, 2], "rope_type": "default", "type": "default", }, "rope_theta": 1000000.0, }, vision_config={ "depth": 2, "hidden_size": 16, "intermediate_size": 16, "num_heads": 2, "out_hidden_size": 16, }, hidden_size=16, vocab_size=152064, vision_end_token_id=151653, vision_start_token_id=151652, vision_token_id=151654, ) text_encoder = Qwen2_5_VLForConditionalGeneration(config) tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": DummyCosmosSafetyChecker(), } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "bad quality", "generator": generator, "num_inference_steps": 2, "guidance_scale": 3.0, "height": 32, "width": 32, "num_frames": 3, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_components_function(self): init_components = self.get_dummy_components() init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} pipe = self.pipeline_class(**init_components) self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (3, 3, 32, 32)) self.assertTrue(torch.isfinite(generated_video).all()) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): for tensor_name in callback_kwargs.keys(): assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs for tensor_name in callback_kwargs.keys(): assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] _ = pipe(**inputs)[0] inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs _ = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=1e-2) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not getattr(self, "test_attention_slicing", True): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_save_load_optional_components(self, expected_max_difference=1e-4): self.pipeline_class._optional_components.remove("safety_checker") super().test_save_load_optional_components(expected_max_difference=expected_max_difference) self.pipeline_class._optional_components.append("safety_checker") def test_serialization_with_variants(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) model_components = [ component_name for component_name, component in pipe.components.items() if isinstance(component, torch.nn.Module) ] model_components.remove("safety_checker") variant = "fp16" with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) with open(f"{tmpdir}/model_index.json", "r") as f: config = json.load(f) for subfolder in os.listdir(tmpdir): if not os.path.isfile(subfolder) and subfolder in model_components: folder_path = os.path.join(tmpdir, subfolder) is_folder = os.path.isdir(folder_path) and subfolder in config assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) def test_torch_dtype_dict(self): components = self.get_dummy_components() if not components: self.skipTest("No dummy components defined.") pipe = self.pipeline_class(**components) specified_key = next(iter(components.keys())) with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: pipe.save_pretrained(tmpdirname, safe_serialization=False) torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} loaded_pipe = self.pipeline_class.from_pretrained( tmpdirname, safety_checker=DummyCosmosSafetyChecker(), torch_dtype=torch_dtype_dict ) for name, component in loaded_pipe.components.items(): if name == "safety_checker": continue if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) self.assertEqual( component.dtype, expected_dtype, f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", ) @unittest.skip( "The pipeline should not be runnable without a safety checker. The test creates a pipeline without passing in " "a safety checker, which makes the pipeline default to the actual Cosmos Guardrail. The Cosmos Guardrail is " "too large and slow to run on CI." ) def test_encode_prompt_works_in_isolation(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/cosmos/test_cosmos2_5_predict.py", "license": "Apache License 2.0", "lines": 287, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/models/controlnets/controlnet_z_image.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Literal import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...loaders.single_file_model import FromOriginalModelMixin from ...models.attention_processor import Attention from ...models.normalization import RMSNorm from ...utils.torch_utils import maybe_allow_in_graph from ..attention_dispatch import dispatch_attention_fn from ..controlnets.controlnet import zero_module from ..modeling_utils import ModelMixin ADALN_EMBED_DIM = 256 SEQ_MULTI_OF = 32 # Copied from diffusers.models.transformers.transformer_z_image.TimestepEmbedder class TimestepEmbedder(nn.Module): def __init__(self, out_size, mid_size=None, frequency_embedding_size=256): super().__init__() if mid_size is None: mid_size = out_size self.mlp = nn.Sequential( nn.Linear(frequency_embedding_size, mid_size, bias=True), nn.SiLU(), nn.Linear(mid_size, out_size, bias=True), ) self.frequency_embedding_size = frequency_embedding_size @staticmethod def timestep_embedding(t, dim, max_period=10000): with torch.amp.autocast("cuda", enabled=False): half = dim // 2 freqs = torch.exp( -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half ) args = t[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t): t_freq = self.timestep_embedding(t, self.frequency_embedding_size) weight_dtype = self.mlp[0].weight.dtype compute_dtype = getattr(self.mlp[0], "compute_dtype", None) if weight_dtype.is_floating_point: t_freq = t_freq.to(weight_dtype) elif compute_dtype is not None: t_freq = t_freq.to(compute_dtype) t_emb = self.mlp(t_freq) return t_emb # Copied from diffusers.models.transformers.transformer_z_image.ZSingleStreamAttnProcessor class ZSingleStreamAttnProcessor: """ Processor for Z-Image single stream attention that adapts the existing Attention class to match the behavior of the original Z-ImageAttention module. """ _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "ZSingleStreamAttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to version 2.0 or higher." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, freqs_cis: torch.Tensor | None = None, ) -> torch.Tensor: query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) # Apply Norms if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE def apply_rotary_emb(x_in: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor: with torch.amp.autocast("cuda", enabled=False): x = torch.view_as_complex(x_in.float().reshape(*x_in.shape[:-1], -1, 2)) freqs_cis = freqs_cis.unsqueeze(2) x_out = torch.view_as_real(x * freqs_cis).flatten(3) return x_out.type_as(x_in) # todo if freqs_cis is not None: query = apply_rotary_emb(query, freqs_cis) key = apply_rotary_emb(key, freqs_cis) # Cast to correct dtype dtype = query.dtype query, key = query.to(dtype), key.to(dtype) # From [batch, seq_len] to [batch, 1, 1, seq_len] -> broadcast to [batch, heads, seq_len, seq_len] if attention_mask is not None and attention_mask.ndim == 2: attention_mask = attention_mask[:, None, None, :] # Compute joint attention hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, parallel_config=self._parallel_config, ) # Reshape back hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(dtype) output = attn.to_out[0](hidden_states) if len(attn.to_out) > 1: # dropout output = attn.to_out[1](output) return output # Copied from diffusers.models.transformers.transformer_z_image.FeedForward class FeedForward(nn.Module): def __init__(self, dim: int, hidden_dim: int): super().__init__() self.w1 = nn.Linear(dim, hidden_dim, bias=False) self.w2 = nn.Linear(hidden_dim, dim, bias=False) self.w3 = nn.Linear(dim, hidden_dim, bias=False) def _forward_silu_gating(self, x1, x3): return F.silu(x1) * x3 def forward(self, x): return self.w2(self._forward_silu_gating(self.w1(x), self.w3(x))) # Copied from diffusers.models.transformers.transformer_z_image.select_per_token def select_per_token( value_noisy: torch.Tensor, value_clean: torch.Tensor, noise_mask: torch.Tensor, seq_len: int, ) -> torch.Tensor: noise_mask_expanded = noise_mask.unsqueeze(-1) # (batch, seq_len, 1) return torch.where( noise_mask_expanded == 1, value_noisy.unsqueeze(1).expand(-1, seq_len, -1), value_clean.unsqueeze(1).expand(-1, seq_len, -1), ) @maybe_allow_in_graph # Copied from diffusers.models.transformers.transformer_z_image.ZImageTransformerBlock class ZImageTransformerBlock(nn.Module): def __init__( self, layer_id: int, dim: int, n_heads: int, n_kv_heads: int, norm_eps: float, qk_norm: bool, modulation=True, ): super().__init__() self.dim = dim self.head_dim = dim // n_heads # Refactored to use diffusers Attention with custom processor # Original Z-Image params: dim, n_heads, n_kv_heads, qk_norm self.attention = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // n_heads, heads=n_heads, qk_norm="rms_norm" if qk_norm else None, eps=1e-5, bias=False, out_bias=False, processor=ZSingleStreamAttnProcessor(), ) self.feed_forward = FeedForward(dim=dim, hidden_dim=int(dim / 3 * 8)) self.layer_id = layer_id self.attention_norm1 = RMSNorm(dim, eps=norm_eps) self.ffn_norm1 = RMSNorm(dim, eps=norm_eps) self.attention_norm2 = RMSNorm(dim, eps=norm_eps) self.ffn_norm2 = RMSNorm(dim, eps=norm_eps) self.modulation = modulation if modulation: self.adaLN_modulation = nn.Sequential(nn.Linear(min(dim, ADALN_EMBED_DIM), 4 * dim, bias=True)) def forward( self, x: torch.Tensor, attn_mask: torch.Tensor, freqs_cis: torch.Tensor, adaln_input: torch.Tensor | None = None, noise_mask: torch.Tensor | None = None, adaln_noisy: torch.Tensor | None = None, adaln_clean: torch.Tensor | None = None, ): if self.modulation: seq_len = x.shape[1] if noise_mask is not None: # Per-token modulation: different modulation for noisy/clean tokens mod_noisy = self.adaLN_modulation(adaln_noisy) mod_clean = self.adaLN_modulation(adaln_clean) scale_msa_noisy, gate_msa_noisy, scale_mlp_noisy, gate_mlp_noisy = mod_noisy.chunk(4, dim=1) scale_msa_clean, gate_msa_clean, scale_mlp_clean, gate_mlp_clean = mod_clean.chunk(4, dim=1) gate_msa_noisy, gate_mlp_noisy = gate_msa_noisy.tanh(), gate_mlp_noisy.tanh() gate_msa_clean, gate_mlp_clean = gate_msa_clean.tanh(), gate_mlp_clean.tanh() scale_msa_noisy, scale_mlp_noisy = 1.0 + scale_msa_noisy, 1.0 + scale_mlp_noisy scale_msa_clean, scale_mlp_clean = 1.0 + scale_msa_clean, 1.0 + scale_mlp_clean scale_msa = select_per_token(scale_msa_noisy, scale_msa_clean, noise_mask, seq_len) scale_mlp = select_per_token(scale_mlp_noisy, scale_mlp_clean, noise_mask, seq_len) gate_msa = select_per_token(gate_msa_noisy, gate_msa_clean, noise_mask, seq_len) gate_mlp = select_per_token(gate_mlp_noisy, gate_mlp_clean, noise_mask, seq_len) else: # Global modulation: same modulation for all tokens (avoid double select) mod = self.adaLN_modulation(adaln_input) scale_msa, gate_msa, scale_mlp, gate_mlp = mod.unsqueeze(1).chunk(4, dim=2) gate_msa, gate_mlp = gate_msa.tanh(), gate_mlp.tanh() scale_msa, scale_mlp = 1.0 + scale_msa, 1.0 + scale_mlp # Attention block attn_out = self.attention( self.attention_norm1(x) * scale_msa, attention_mask=attn_mask, freqs_cis=freqs_cis ) x = x + gate_msa * self.attention_norm2(attn_out) # FFN block x = x + gate_mlp * self.ffn_norm2(self.feed_forward(self.ffn_norm1(x) * scale_mlp)) else: # Attention block attn_out = self.attention(self.attention_norm1(x), attention_mask=attn_mask, freqs_cis=freqs_cis) x = x + self.attention_norm2(attn_out) # FFN block x = x + self.ffn_norm2(self.feed_forward(self.ffn_norm1(x))) return x # Copied from diffusers.models.transformers.transformer_z_image.RopeEmbedder class RopeEmbedder: def __init__( self, theta: float = 256.0, axes_dims: list[int] = (16, 56, 56), axes_lens: list[int] = (64, 128, 128), ): self.theta = theta self.axes_dims = axes_dims self.axes_lens = axes_lens assert len(axes_dims) == len(axes_lens), "axes_dims and axes_lens must have the same length" self.freqs_cis = None @staticmethod def precompute_freqs_cis(dim: list[int], end: list[int], theta: float = 256.0): with torch.device("cpu"): freqs_cis = [] for i, (d, e) in enumerate(zip(dim, end)): freqs = 1.0 / (theta ** (torch.arange(0, d, 2, dtype=torch.float64, device="cpu") / d)) timestep = torch.arange(e, device=freqs.device, dtype=torch.float64) freqs = torch.outer(timestep, freqs).float() freqs_cis_i = torch.polar(torch.ones_like(freqs), freqs).to(torch.complex64) # complex64 freqs_cis.append(freqs_cis_i) return freqs_cis def __call__(self, ids: torch.Tensor): assert ids.ndim == 2 assert ids.shape[-1] == len(self.axes_dims) device = ids.device if self.freqs_cis is None: self.freqs_cis = self.precompute_freqs_cis(self.axes_dims, self.axes_lens, theta=self.theta) self.freqs_cis = [freqs_cis.to(device) for freqs_cis in self.freqs_cis] else: # Ensure freqs_cis are on the same device as ids if self.freqs_cis[0].device != device: self.freqs_cis = [freqs_cis.to(device) for freqs_cis in self.freqs_cis] result = [] for i in range(len(self.axes_dims)): index = ids[:, i] result.append(self.freqs_cis[i][index]) return torch.cat(result, dim=-1) @maybe_allow_in_graph class ZImageControlTransformerBlock(nn.Module): def __init__( self, layer_id: int, dim: int, n_heads: int, n_kv_heads: int, norm_eps: float, qk_norm: bool, modulation=True, block_id=0, ): super().__init__() self.dim = dim self.head_dim = dim // n_heads # Refactored to use diffusers Attention with custom processor # Original Z-Image params: dim, n_heads, n_kv_heads, qk_norm self.attention = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // n_heads, heads=n_heads, qk_norm="rms_norm" if qk_norm else None, eps=1e-5, bias=False, out_bias=False, processor=ZSingleStreamAttnProcessor(), ) self.feed_forward = FeedForward(dim=dim, hidden_dim=int(dim / 3 * 8)) self.layer_id = layer_id self.attention_norm1 = RMSNorm(dim, eps=norm_eps) self.ffn_norm1 = RMSNorm(dim, eps=norm_eps) self.attention_norm2 = RMSNorm(dim, eps=norm_eps) self.ffn_norm2 = RMSNorm(dim, eps=norm_eps) self.modulation = modulation if modulation: self.adaLN_modulation = nn.Sequential(nn.Linear(min(dim, ADALN_EMBED_DIM), 4 * dim, bias=True)) # Control variant start self.block_id = block_id if block_id == 0: self.before_proj = zero_module(nn.Linear(self.dim, self.dim)) self.after_proj = zero_module(nn.Linear(self.dim, self.dim)) def forward( self, c: torch.Tensor, x: torch.Tensor, attn_mask: torch.Tensor, freqs_cis: torch.Tensor, adaln_input: torch.Tensor | None = None, ): # Control if self.block_id == 0: c = self.before_proj(c) + x all_c = [] else: all_c = list(torch.unbind(c)) c = all_c.pop(-1) # Compared to `ZImageTransformerBlock` x -> c if self.modulation: assert adaln_input is not None scale_msa, gate_msa, scale_mlp, gate_mlp = self.adaLN_modulation(adaln_input).unsqueeze(1).chunk(4, dim=2) gate_msa, gate_mlp = gate_msa.tanh(), gate_mlp.tanh() scale_msa, scale_mlp = 1.0 + scale_msa, 1.0 + scale_mlp # Attention block attn_out = self.attention( self.attention_norm1(c) * scale_msa, attention_mask=attn_mask, freqs_cis=freqs_cis ) c = c + gate_msa * self.attention_norm2(attn_out) # FFN block c = c + gate_mlp * self.ffn_norm2(self.feed_forward(self.ffn_norm1(c) * scale_mlp)) else: # Attention block attn_out = self.attention(self.attention_norm1(c), attention_mask=attn_mask, freqs_cis=freqs_cis) c = c + self.attention_norm2(attn_out) # FFN block c = c + self.ffn_norm2(self.feed_forward(self.ffn_norm1(c))) # Control c_skip = self.after_proj(c) all_c += [c_skip, c] c = torch.stack(all_c) return c class ZImageControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, control_layers_places: list[int] = None, control_refiner_layers_places: list[int] = None, control_in_dim=None, add_control_noise_refiner: Literal["control_layers", "control_noise_refiner"] | None = None, all_patch_size=(2,), all_f_patch_size=(1,), dim=3840, n_refiner_layers=2, n_heads=30, n_kv_heads=30, norm_eps=1e-5, qk_norm=True, ): super().__init__() self.control_layers_places = control_layers_places self.control_in_dim = control_in_dim self.control_refiner_layers_places = control_refiner_layers_places self.add_control_noise_refiner = add_control_noise_refiner assert 0 in self.control_layers_places # control blocks self.control_layers = nn.ModuleList( [ ZImageControlTransformerBlock(i, dim, n_heads, n_kv_heads, norm_eps, qk_norm, block_id=i) for i in self.control_layers_places ] ) # control patch embeddings all_x_embedder = {} for patch_idx, (patch_size, f_patch_size) in enumerate(zip(all_patch_size, all_f_patch_size)): x_embedder = nn.Linear(f_patch_size * patch_size * patch_size * self.control_in_dim, dim, bias=True) all_x_embedder[f"{patch_size}-{f_patch_size}"] = x_embedder self.control_all_x_embedder = nn.ModuleDict(all_x_embedder) if self.add_control_noise_refiner == "control_layers": self.control_noise_refiner = None elif self.add_control_noise_refiner == "control_noise_refiner": self.control_noise_refiner = nn.ModuleList( [ ZImageControlTransformerBlock( 1000 + layer_id, dim, n_heads, n_kv_heads, norm_eps, qk_norm, modulation=True, block_id=layer_id, ) for layer_id in range(n_refiner_layers) ] ) else: self.control_noise_refiner = nn.ModuleList( [ ZImageTransformerBlock( 1000 + layer_id, dim, n_heads, n_kv_heads, norm_eps, qk_norm, modulation=True, ) for layer_id in range(n_refiner_layers) ] ) self.t_scale: float | None = None self.t_embedder: TimestepEmbedder | None = None self.all_x_embedder: nn.ModuleDict | None = None self.cap_embedder: nn.Sequential | None = None self.rope_embedder: RopeEmbedder | None = None self.noise_refiner: nn.ModuleList | None = None self.context_refiner: nn.ModuleList | None = None self.x_pad_token: nn.Parameter | None = None self.cap_pad_token: nn.Parameter | None = None @classmethod def from_transformer(cls, controlnet, transformer): controlnet.t_scale = transformer.t_scale controlnet.t_embedder = transformer.t_embedder controlnet.all_x_embedder = transformer.all_x_embedder controlnet.cap_embedder = transformer.cap_embedder controlnet.rope_embedder = transformer.rope_embedder controlnet.noise_refiner = transformer.noise_refiner controlnet.context_refiner = transformer.context_refiner controlnet.x_pad_token = transformer.x_pad_token controlnet.cap_pad_token = transformer.cap_pad_token return controlnet @staticmethod # Copied from diffusers.models.transformers.transformer_z_image.ZImageTransformer2DModel.create_coordinate_grid def create_coordinate_grid(size, start=None, device=None): if start is None: start = (0 for _ in size) axes = [torch.arange(x0, x0 + span, dtype=torch.int32, device=device) for x0, span in zip(start, size)] grids = torch.meshgrid(axes, indexing="ij") return torch.stack(grids, dim=-1) # Copied from diffusers.models.transformers.transformer_z_image.ZImageTransformer2DModel._patchify_image def _patchify_image(self, image: torch.Tensor, patch_size: int, f_patch_size: int): """Patchify a single image tensor: (C, F, H, W) -> (num_patches, patch_dim).""" pH, pW, pF = patch_size, patch_size, f_patch_size C, F, H, W = image.size() F_tokens, H_tokens, W_tokens = F // pF, H // pH, W // pW image = image.view(C, F_tokens, pF, H_tokens, pH, W_tokens, pW) image = image.permute(1, 3, 5, 2, 4, 6, 0).reshape(F_tokens * H_tokens * W_tokens, pF * pH * pW * C) return image, (F, H, W), (F_tokens, H_tokens, W_tokens) # Copied from diffusers.models.transformers.transformer_z_image.ZImageTransformer2DModel._pad_with_ids def _pad_with_ids( self, feat: torch.Tensor, pos_grid_size: tuple, pos_start: tuple, device: torch.device, noise_mask_val: int | None = None, ): """Pad feature to SEQ_MULTI_OF, create position IDs and pad mask.""" ori_len = len(feat) pad_len = (-ori_len) % SEQ_MULTI_OF total_len = ori_len + pad_len # Pos IDs ori_pos_ids = self.create_coordinate_grid(size=pos_grid_size, start=pos_start, device=device).flatten(0, 2) if pad_len > 0: pad_pos_ids = ( self.create_coordinate_grid(size=(1, 1, 1), start=(0, 0, 0), device=device) .flatten(0, 2) .repeat(pad_len, 1) ) pos_ids = torch.cat([ori_pos_ids, pad_pos_ids], dim=0) padded_feat = torch.cat([feat, feat[-1:].repeat(pad_len, 1)], dim=0) pad_mask = torch.cat( [ torch.zeros(ori_len, dtype=torch.bool, device=device), torch.ones(pad_len, dtype=torch.bool, device=device), ] ) else: pos_ids = ori_pos_ids padded_feat = feat pad_mask = torch.zeros(ori_len, dtype=torch.bool, device=device) noise_mask = [noise_mask_val] * total_len if noise_mask_val is not None else None # token level return padded_feat, pos_ids, pad_mask, total_len, noise_mask # Copied from diffusers.models.transformers.transformer_z_image.ZImageTransformer2DModel.patchify_and_embed def patchify_and_embed( self, all_image: list[torch.Tensor], all_cap_feats: list[torch.Tensor], patch_size: int, f_patch_size: int ): """Patchify for basic mode: single image per batch item.""" device = all_image[0].device all_img_out, all_img_size, all_img_pos_ids, all_img_pad_mask = [], [], [], [] all_cap_out, all_cap_pos_ids, all_cap_pad_mask = [], [], [] for image, cap_feat in zip(all_image, all_cap_feats): # Caption cap_out, cap_pos_ids, cap_pad_mask, cap_len, _ = self._pad_with_ids( cap_feat, (len(cap_feat) + (-len(cap_feat)) % SEQ_MULTI_OF, 1, 1), (1, 0, 0), device ) all_cap_out.append(cap_out) all_cap_pos_ids.append(cap_pos_ids) all_cap_pad_mask.append(cap_pad_mask) # Image img_patches, size, (F_t, H_t, W_t) = self._patchify_image(image, patch_size, f_patch_size) img_out, img_pos_ids, img_pad_mask, _, _ = self._pad_with_ids( img_patches, (F_t, H_t, W_t), (cap_len + 1, 0, 0), device ) all_img_out.append(img_out) all_img_size.append(size) all_img_pos_ids.append(img_pos_ids) all_img_pad_mask.append(img_pad_mask) return ( all_img_out, all_cap_out, all_img_size, all_img_pos_ids, all_cap_pos_ids, all_img_pad_mask, all_cap_pad_mask, ) def patchify( self, all_image: list[torch.Tensor], patch_size: int, f_patch_size: int, ): pH = pW = patch_size pF = f_patch_size all_image_out = [] for i, image in enumerate(all_image): ### Process Image C, F, H, W = image.size() F_tokens, H_tokens, W_tokens = F // pF, H // pH, W // pW image = image.view(C, F_tokens, pF, H_tokens, pH, W_tokens, pW) # "c f pf h ph w pw -> (f h w) (pf ph pw c)" image = image.permute(1, 3, 5, 2, 4, 6, 0).reshape(F_tokens * H_tokens * W_tokens, pF * pH * pW * C) image_ori_len = len(image) image_padding_len = (-image_ori_len) % SEQ_MULTI_OF # padded feature image_padded_feat = torch.cat([image, image[-1:].repeat(image_padding_len, 1)], dim=0) all_image_out.append(image_padded_feat) return all_image_out def forward( self, x: list[torch.Tensor], t, cap_feats: list[torch.Tensor], control_context: list[torch.Tensor], conditioning_scale: float = 1.0, patch_size=2, f_patch_size=1, ): if ( self.t_scale is None or self.t_embedder is None or self.all_x_embedder is None or self.cap_embedder is None or self.rope_embedder is None or self.noise_refiner is None or self.context_refiner is None or self.x_pad_token is None or self.cap_pad_token is None ): raise ValueError( "Required modules are `None`, use `from_transformer` to share required modules from `transformer`." ) assert patch_size in self.config.all_patch_size assert f_patch_size in self.config.all_f_patch_size bsz = len(x) device = x[0].device t = t * self.t_scale t = self.t_embedder(t) ( x, cap_feats, x_size, x_pos_ids, cap_pos_ids, x_inner_pad_mask, cap_inner_pad_mask, ) = self.patchify_and_embed(x, cap_feats, patch_size, f_patch_size) x_item_seqlens = [len(_) for _ in x] assert all(_ % SEQ_MULTI_OF == 0 for _ in x_item_seqlens) x_max_item_seqlen = max(x_item_seqlens) control_context = self.patchify(control_context, patch_size, f_patch_size) control_context = torch.cat(control_context, dim=0) control_context = self.control_all_x_embedder[f"{patch_size}-{f_patch_size}"](control_context) control_context[torch.cat(x_inner_pad_mask)] = self.x_pad_token control_context = list(control_context.split(x_item_seqlens, dim=0)) control_context = pad_sequence(control_context, batch_first=True, padding_value=0.0) # x embed & refine x = torch.cat(x, dim=0) x = self.all_x_embedder[f"{patch_size}-{f_patch_size}"](x) # Match t_embedder output dtype to x for layerwise casting compatibility adaln_input = t.type_as(x) x[torch.cat(x_inner_pad_mask)] = self.x_pad_token x = list(x.split(x_item_seqlens, dim=0)) x_freqs_cis = list(self.rope_embedder(torch.cat(x_pos_ids, dim=0)).split([len(_) for _ in x_pos_ids], dim=0)) x = pad_sequence(x, batch_first=True, padding_value=0.0) x_freqs_cis = pad_sequence(x_freqs_cis, batch_first=True, padding_value=0.0) # Clarify the length matches to satisfy Dynamo due to "Symbolic Shape Inference" to avoid compilation errors x_freqs_cis = x_freqs_cis[:, : x.shape[1]] x_attn_mask = torch.zeros((bsz, x_max_item_seqlen), dtype=torch.bool, device=device) for i, seq_len in enumerate(x_item_seqlens): x_attn_mask[i, :seq_len] = 1 if self.add_control_noise_refiner is not None: if self.add_control_noise_refiner == "control_layers": layers = self.control_layers elif self.add_control_noise_refiner == "control_noise_refiner": layers = self.control_noise_refiner else: raise ValueError(f"Unsupported `add_control_noise_refiner` type: {self.add_control_noise_refiner}.") for layer in layers: if torch.is_grad_enabled() and self.gradient_checkpointing: control_context = self._gradient_checkpointing_func( layer, control_context, x, x_attn_mask, x_freqs_cis, adaln_input ) else: control_context = layer(control_context, x, x_attn_mask, x_freqs_cis, adaln_input) hints = torch.unbind(control_context)[:-1] control_context = torch.unbind(control_context)[-1] noise_refiner_block_samples = { layer_idx: hints[idx] * conditioning_scale for idx, layer_idx in enumerate(self.control_refiner_layers_places) } else: noise_refiner_block_samples = None if torch.is_grad_enabled() and self.gradient_checkpointing: for layer_idx, layer in enumerate(self.noise_refiner): x = self._gradient_checkpointing_func(layer, x, x_attn_mask, x_freqs_cis, adaln_input) if noise_refiner_block_samples is not None: if layer_idx in noise_refiner_block_samples: x = x + noise_refiner_block_samples[layer_idx] else: for layer_idx, layer in enumerate(self.noise_refiner): x = layer(x, x_attn_mask, x_freqs_cis, adaln_input) if noise_refiner_block_samples is not None: if layer_idx in noise_refiner_block_samples: x = x + noise_refiner_block_samples[layer_idx] # cap embed & refine cap_item_seqlens = [len(_) for _ in cap_feats] cap_max_item_seqlen = max(cap_item_seqlens) cap_feats = torch.cat(cap_feats, dim=0) cap_feats = self.cap_embedder(cap_feats) cap_feats[torch.cat(cap_inner_pad_mask)] = self.cap_pad_token cap_feats = list(cap_feats.split(cap_item_seqlens, dim=0)) cap_freqs_cis = list( self.rope_embedder(torch.cat(cap_pos_ids, dim=0)).split([len(_) for _ in cap_pos_ids], dim=0) ) cap_feats = pad_sequence(cap_feats, batch_first=True, padding_value=0.0) cap_freqs_cis = pad_sequence(cap_freqs_cis, batch_first=True, padding_value=0.0) # Clarify the length matches to satisfy Dynamo due to "Symbolic Shape Inference" to avoid compilation errors cap_freqs_cis = cap_freqs_cis[:, : cap_feats.shape[1]] cap_attn_mask = torch.zeros((bsz, cap_max_item_seqlen), dtype=torch.bool, device=device) for i, seq_len in enumerate(cap_item_seqlens): cap_attn_mask[i, :seq_len] = 1 if torch.is_grad_enabled() and self.gradient_checkpointing: for layer in self.context_refiner: cap_feats = self._gradient_checkpointing_func(layer, cap_feats, cap_attn_mask, cap_freqs_cis) else: for layer in self.context_refiner: cap_feats = layer(cap_feats, cap_attn_mask, cap_freqs_cis) # unified unified = [] unified_freqs_cis = [] for i in range(bsz): x_len = x_item_seqlens[i] cap_len = cap_item_seqlens[i] unified.append(torch.cat([x[i][:x_len], cap_feats[i][:cap_len]])) unified_freqs_cis.append(torch.cat([x_freqs_cis[i][:x_len], cap_freqs_cis[i][:cap_len]])) unified_item_seqlens = [a + b for a, b in zip(cap_item_seqlens, x_item_seqlens)] assert unified_item_seqlens == [len(_) for _ in unified] unified_max_item_seqlen = max(unified_item_seqlens) unified = pad_sequence(unified, batch_first=True, padding_value=0.0) unified_freqs_cis = pad_sequence(unified_freqs_cis, batch_first=True, padding_value=0.0) unified_attn_mask = torch.zeros((bsz, unified_max_item_seqlen), dtype=torch.bool, device=device) for i, seq_len in enumerate(unified_item_seqlens): unified_attn_mask[i, :seq_len] = 1 ## ControlNet start if not self.add_control_noise_refiner: if torch.is_grad_enabled() and self.gradient_checkpointing: for layer in self.control_noise_refiner: control_context = self._gradient_checkpointing_func( layer, control_context, x_attn_mask, x_freqs_cis, adaln_input ) else: for layer in self.control_noise_refiner: control_context = layer(control_context, x_attn_mask, x_freqs_cis, adaln_input) # unified control_context_unified = [] for i in range(bsz): x_len = x_item_seqlens[i] cap_len = cap_item_seqlens[i] control_context_unified.append(torch.cat([control_context[i][:x_len], cap_feats[i][:cap_len]])) control_context_unified = pad_sequence(control_context_unified, batch_first=True, padding_value=0.0) for layer in self.control_layers: if torch.is_grad_enabled() and self.gradient_checkpointing: control_context_unified = self._gradient_checkpointing_func( layer, control_context_unified, unified, unified_attn_mask, unified_freqs_cis, adaln_input ) else: control_context_unified = layer( control_context_unified, unified, unified_attn_mask, unified_freqs_cis, adaln_input ) hints = torch.unbind(control_context_unified)[:-1] controlnet_block_samples = { layer_idx: hints[idx] * conditioning_scale for idx, layer_idx in enumerate(self.control_layers_places) } return controlnet_block_samples
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/controlnets/controlnet_z_image.py", "license": "Apache License 2.0", "lines": 726, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/z_image/pipeline_z_image_controlnet.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import torch from transformers import AutoTokenizer, PreTrainedModel from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin from ...models.autoencoders import AutoencoderKL from ...models.controlnets import ZImageControlNetModel from ...models.transformers import ZImageTransformer2DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from .pipeline_output import ZImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import ZImageControlNetPipeline >>> from diffusers import ZImageControlNetModel >>> from diffusers.utils import load_image >>> from huggingface_hub import hf_hub_download >>> controlnet = ZImageControlNetModel.from_single_file( ... hf_hub_download( ... "alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union", ... filename="Z-Image-Turbo-Fun-Controlnet-Union.safetensors", ... ), ... torch_dtype=torch.bfloat16, ... ) >>> # 2.1 >>> # controlnet = ZImageControlNetModel.from_single_file( >>> # hf_hub_download( >>> # "alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union-2.0", >>> # filename="Z-Image-Turbo-Fun-Controlnet-Union-2.1.safetensors", >>> # ), >>> # torch_dtype=torch.bfloat16, >>> # ) >>> # 2.0 >>> # controlnet = ZImageControlNetModel.from_single_file( >>> # hf_hub_download( >>> # "alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union-2.0", >>> # filename="Z-Image-Turbo-Fun-Controlnet-Union-2.0.safetensors", >>> # ), >>> # torch_dtype=torch.bfloat16, >>> # ) >>> pipe = ZImageControlNetPipeline.from_pretrained( ... "Tongyi-MAI/Z-Image-Turbo", controlnet=controlnet, torch_dtype=torch.bfloat16 ... ) >>> pipe.to("cuda") >>> # Optionally, set the attention backend to flash-attn 2 or 3, default is SDPA in PyTorch. >>> # (1) Use flash attention 2 >>> # pipe.transformer.set_attention_backend("flash") >>> # (2) Use flash attention 3 >>> # pipe.transformer.set_attention_backend("_flash_3") >>> control_image = load_image( ... "https://huggingface.co/alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union/resolve/main/asset/pose.jpg?download=true" ... ) >>> prompt = "一位年轻女子站在阳光明媚的海岸线上,白裙在轻拂的海风中微微飘动。她拥有一头鲜艳的紫色长发,在风中轻盈舞动,发间系着一个精致的黑色蝴蝶结,与身后柔和的蔚蓝天空形成鲜明对比。她面容清秀,眉目精致,透着一股甜美的青春气息;神情柔和,略带羞涩,目光静静地凝望着远方的地平线,双手自然交叠于身前,仿佛沉浸在思绪之中。在她身后,是辽阔无垠、波光粼粼的大海,阳光洒在海面上,映出温暖的金色光晕。" >>> image = pipe( ... prompt, ... control_image=control_image, ... controlnet_conditioning_scale=0.75, ... height=1728, ... width=992, ... num_inference_steps=9, ... guidance_scale=0.0, ... generator=torch.Generator("cuda").manual_seed(43), ... ).images[0] >>> image.save("zimage.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class ZImageControlNetPipeline(DiffusionPipeline, FromSingleFileMixin): model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: PreTrainedModel, tokenizer: AutoTokenizer, transformer: ZImageTransformer2DModel, controlnet: ZImageControlNetModel, ): super().__init__() controlnet = ZImageControlNetModel.from_transformer(controlnet, transformer) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, transformer=transformer, controlnet=controlnet, ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 ) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, do_classifier_free_guidance: bool = True, negative_prompt: str | list[str] | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, max_sequence_length: int = 512, ): prompt = [prompt] if isinstance(prompt, str) else prompt prompt_embeds = self._encode_prompt( prompt=prompt, device=device, prompt_embeds=prompt_embeds, max_sequence_length=max_sequence_length, ) if do_classifier_free_guidance: if negative_prompt is None: negative_prompt = ["" for _ in prompt] else: negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt assert len(prompt) == len(negative_prompt) negative_prompt_embeds = self._encode_prompt( prompt=negative_prompt, device=device, prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, ) else: negative_prompt_embeds = [] return prompt_embeds, negative_prompt_embeds def _encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, max_sequence_length: int = 512, ) -> list[torch.FloatTensor]: device = device or self._execution_device if prompt_embeds is not None: return prompt_embeds if isinstance(prompt, str): prompt = [prompt] for i, prompt_item in enumerate(prompt): messages = [ {"role": "user", "content": prompt_item}, ] prompt_item = self.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=True, ) prompt[i] = prompt_item text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) prompt_masks = text_inputs.attention_mask.to(device).bool() prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_masks, output_hidden_states=True, ).hidden_states[-2] embeddings_list = [] for i in range(len(prompt_embeds)): embeddings_list.append(prompt_embeds[i][prompt_masks[i]]) return embeddings_list def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) return latents # Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, sigmas: list[float] | None = None, guidance_scale: float = 5.0, control_image: PipelineImageInput = None, controlnet_conditioning_scale: float | list[float] = 0.75, cfg_normalization: bool = False, cfg_truncation: float = 1.0, negative_prompt: str | list[str] | None = None, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, negative_prompt_embeds: list[torch.FloatTensor] | None = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to 1024): The height in pixels of the generated image. width (`int`, *optional*, defaults to 1024): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. cfg_normalization (`bool`, *optional*, defaults to False): Whether to apply configuration normalization. cfg_truncation (`float`, *optional*, defaults to 1.0): The truncation value for configuration. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`list[torch.FloatTensor]`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`list[torch.FloatTensor]`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.ZImagePipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to 512): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.z_image.ZImagePipelineOutput`] or `tuple`: [`~pipelines.z_image.ZImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ height = height or 1024 width = width or 1024 vae_scale = self.vae_scale_factor * 2 if height % vae_scale != 0: raise ValueError( f"Height must be divisible by {vae_scale} (got {height}). " f"Please adjust the height to a multiple of {vae_scale}." ) if width % vae_scale != 0: raise ValueError( f"Width must be divisible by {vae_scale} (got {width}). " f"Please adjust the width to a multiple of {vae_scale}." ) device = self._execution_device self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False self._cfg_normalization = cfg_normalization self._cfg_truncation = cfg_truncation # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = len(prompt_embeds) # If prompt_embeds is provided and prompt is None, skip encoding if prompt_embeds is not None and prompt is None: if self.do_classifier_free_guidance and negative_prompt_embeds is None: raise ValueError( "When `prompt_embeds` is provided without `prompt`, " "`negative_prompt_embeds` must also be provided for classifier-free guidance." ) else: ( prompt_embeds, negative_prompt_embeds, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, device=device, max_sequence_length=max_sequence_length, ) # 4. Prepare latent variables num_channels_latents = self.transformer.in_channels control_image = self.prepare_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.vae.dtype, ) height, width = control_image.shape[-2:] control_image = retrieve_latents(self.vae.encode(control_image), generator=generator, sample_mode="argmax") control_image = (control_image - self.vae.config.shift_factor) * self.vae.config.scaling_factor control_image = control_image.unsqueeze(2) if num_channels_latents != self.controlnet.config.control_in_dim: # For model version 2.0 control_image = torch.cat( [ control_image, torch.zeros( control_image.shape[0], self.controlnet.config.control_in_dim - num_channels_latents, *control_image.shape[2:], ).to(device=control_image.device, dtype=control_image.dtype), ], dim=1, ) latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, torch.float32, device, generator, latents, ) # Repeat prompt_embeds for num_images_per_prompt if num_images_per_prompt > 1: prompt_embeds = [pe for pe in prompt_embeds for _ in range(num_images_per_prompt)] if self.do_classifier_free_guidance and negative_prompt_embeds: negative_prompt_embeds = [npe for npe in negative_prompt_embeds for _ in range(num_images_per_prompt)] actual_batch_size = batch_size * num_images_per_prompt image_seq_len = (latents.shape[2] // 2) * (latents.shape[3] // 2) # 5. Prepare timesteps mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) self.scheduler.sigma_min = 0.0 scheduler_kwargs = {"mu": mu} timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, **scheduler_kwargs, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]) timestep = (1000 - timestep) / 1000 # Normalized time for time-aware config (0 at start, 1 at end) t_norm = timestep[0].item() # Handle cfg truncation current_guidance_scale = self.guidance_scale if ( self.do_classifier_free_guidance and self._cfg_truncation is not None and float(self._cfg_truncation) <= 1 ): if t_norm > self._cfg_truncation: current_guidance_scale = 0.0 # Run CFG only if configured AND scale is non-zero apply_cfg = self.do_classifier_free_guidance and current_guidance_scale > 0 if apply_cfg: latents_typed = latents.to(self.transformer.dtype) latent_model_input = latents_typed.repeat(2, 1, 1, 1) prompt_embeds_model_input = prompt_embeds + negative_prompt_embeds timestep_model_input = timestep.repeat(2) control_image_input = control_image.repeat(2, 1, 1, 1, 1) else: latent_model_input = latents.to(self.transformer.dtype) prompt_embeds_model_input = prompt_embeds timestep_model_input = timestep control_image_input = control_image latent_model_input = latent_model_input.unsqueeze(2) latent_model_input_list = list(latent_model_input.unbind(dim=0)) controlnet_block_samples = self.controlnet( latent_model_input_list, timestep_model_input, prompt_embeds_model_input, control_image_input, conditioning_scale=controlnet_conditioning_scale, ) model_out_list = self.transformer( latent_model_input_list, timestep_model_input, prompt_embeds_model_input, controlnet_block_samples=controlnet_block_samples, )[0] if apply_cfg: # Perform CFG pos_out = model_out_list[:actual_batch_size] neg_out = model_out_list[actual_batch_size:] noise_pred = [] for j in range(actual_batch_size): pos = pos_out[j].float() neg = neg_out[j].float() pred = pos + current_guidance_scale * (pos - neg) # Renormalization if self._cfg_normalization and float(self._cfg_normalization) > 0.0: ori_pos_norm = torch.linalg.vector_norm(pos) new_pos_norm = torch.linalg.vector_norm(pred) max_new_norm = ori_pos_norm * float(self._cfg_normalization) if new_pos_norm > max_new_norm: pred = pred * (max_new_norm / new_pos_norm) noise_pred.append(pred) noise_pred = torch.stack(noise_pred, dim=0) else: noise_pred = torch.stack([t.float() for t in model_out_list], dim=0) noise_pred = noise_pred.squeeze(2) noise_pred = -noise_pred # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred.to(torch.float32), t, latents, return_dict=False)[0] assert latents.dtype == torch.float32 if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == "latent": image = latents else: latents = latents.to(self.vae.dtype) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ZImagePipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/z_image/pipeline_z_image_controlnet.py", "license": "Apache License 2.0", "lines": 635, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/qwenimage/pipeline_qwenimage_layered.py
# Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from typing import Any, Callable import numpy as np import torch from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import QwenImageLoraLoaderMixin from ...models import AutoencoderKLQwenImage, QwenImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import QwenImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from PIL import Image >>> from diffusers import QwenImageLayeredPipeline >>> from diffusers.utils import load_image >>> pipe = QwenImageLayeredPipeline.from_pretrained("Qwen/Qwen-Image-Layered", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png" ... ).convert("RGBA") >>> prompt = "" >>> # Depending on the variant being used, the pipeline call will slightly vary. >>> # Refer to the pipeline documentation for more details. >>> images = pipe( ... image, ... prompt, ... num_inference_steps=50, ... true_cfg_scale=4.0, ... layers=4, ... resolution=640, ... cfg_normalize=False, ... use_en_prompt=True, ... ).images[0] >>> for i, image in enumerate(images): ... image.save(f"{i}.out.png") ``` """ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_edit_plus.calculate_dimensions def calculate_dimensions(target_area, ratio): width = math.sqrt(target_area * ratio) height = width / ratio width = round(width / 32) * 32 height = round(height / 32) * 32 return width, height class QwenImageLayeredPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin): r""" The Qwen-Image-Layered pipeline for image decomposing. Args: transformer ([`QwenImageTransformer2DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`Qwen2.5-VL-7B-Instruct`]): [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant. tokenizer (`QwenTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLQwenImage, text_encoder: Qwen2_5_VLForConditionalGeneration, tokenizer: Qwen2Tokenizer, processor: Qwen2VLProcessor, transformer: QwenImageTransformer2DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, processor=processor, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8 self.latent_channels = self.vae.config.z_dim if getattr(self, "vae", None) else 16 # QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.vl_processor = processor self.tokenizer_max_length = 1024 self.prompt_template_encode = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n" self.prompt_template_encode_start_idx = 34 self.image_caption_prompt_cn = """<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n# 图像标注器\n你是一个专业的图像标注器。请基于输入图像,撰写图注:\n1. 使用自然、描述性的语言撰写图注,不要使用结构化形式或富文本形式。\n2. 通过加入以下内容,丰富图注细节:\n - 对象的属性:如数量、颜色、形状、大小、位置、材质、状态、动作等\n - 对象间的视觉关系:如空间关系、功能关系、动作关系、从属关系、比较关系、因果关系等\n - 环境细节:例如天气、光照、颜色、纹理、气氛等\n - 文字内容:识别图像中清晰可见的文字,不做翻译和解释,用引号在图注中强调\n3. 保持真实性与准确性:\n - 不要使用笼统的描述\n - 描述图像中所有可见的信息,但不要加入没有在图像中出现的内容\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>\n<|im_start|>assistant\n""" self.image_caption_prompt_en = """<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n# Image Annotator\nYou are a professional image annotator. Please write an image caption based on the input image:\n1. Write the caption using natural, descriptive language without structured formats or rich text.\n2. Enrich caption details by including: \n - Object attributes, such as quantity, color, shape, size, material, state, position, actions, and so on\n - Vision Relations between objects, such as spatial relations, functional relations, possessive relations, attachment relations, action relations, comparative relations, causal relations, and so on\n - Environmental details, such as weather, lighting, colors, textures, atmosphere, and so on\n - Identify the text clearly visible in the image, without translation or explanation, and highlight it in the caption with quotation marks\n3. Maintain authenticity and accuracy:\n - Avoid generalizations\n - Describe all visible information in the image, while do not add information not explicitly shown in the image\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>\n<|im_start|>assistant\n""" self.default_sample_size = 128 # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._extract_masked_hidden def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor): bool_mask = mask.bool() valid_lengths = bool_mask.sum(dim=1) selected = hidden_states[bool_mask] split_result = torch.split(selected, valid_lengths.tolist(), dim=0) return split_result def _get_qwen_prompt_embeds( self, prompt: str | list[str] = None, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt template = self.prompt_template_encode drop_idx = self.prompt_template_encode_start_idx txt = [template.format(e) for e in prompt] txt_tokens = self.tokenizer( txt, padding=True, return_tensors="pt", ).to(device) encoder_hidden_states = self.text_encoder( input_ids=txt_tokens.input_ids, attention_mask=txt_tokens.attention_mask, output_hidden_states=True, ) hidden_states = encoder_hidden_states.hidden_states[-1] split_hidden_states = self._extract_masked_hidden(hidden_states, txt_tokens.attention_mask) split_hidden_states = [e[drop_idx:] for e in split_hidden_states] attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states] max_seq_len = max([e.size(0) for e in split_hidden_states]) prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states] ) encoder_attention_mask = torch.stack( [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list] ) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) return prompt_embeds, encoder_attention_mask # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, num_images_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, prompt_embeds_mask: torch.Tensor | None = None, max_sequence_length: int = 1024, ): r""" Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if prompt_embeds is None else prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds, prompt_embeds_mask = self._get_qwen_prompt_embeds(prompt, device) prompt_embeds = prompt_embeds[:, :max_sequence_length] _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if prompt_embeds_mask is not None: prompt_embeds_mask = prompt_embeds_mask[:, :max_sequence_length] prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_images_per_prompt, 1) prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_images_per_prompt, seq_len) if prompt_embeds_mask.all(): prompt_embeds_mask = None return prompt_embeds, prompt_embeds_mask def get_image_caption(self, prompt_image, use_en_prompt=True, device=None): if use_en_prompt: prompt = self.image_caption_prompt_en else: prompt = self.image_caption_prompt_cn model_inputs = self.vl_processor( text=prompt, images=prompt_image, padding=True, return_tensors="pt", ).to(device) generated_ids = self.text_encoder.generate(**model_inputs, max_new_tokens=512) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(model_inputs.input_ids, generated_ids) ] output_text = self.vl_processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False )[0] return output_text.strip() def check_inputs( self, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_embeds_mask=None, negative_prompt_embeds_mask=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_embeds_mask is None: raise ValueError( "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if max_sequence_length is not None and max_sequence_length > 1024: raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}") @staticmethod def _pack_latents(latents, batch_size, num_channels_latents, height, width, layers): latents = latents.view(batch_size, layers, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 1, 3, 5, 2, 4, 6) latents = latents.reshape(batch_size, layers * (height // 2) * (width // 2), num_channels_latents * 4) return latents @staticmethod def _unpack_latents(latents, height, width, layers, vae_scale_factor): batch_size, num_patches, channels = latents.shape # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (vae_scale_factor * 2)) width = 2 * (int(width) // (vae_scale_factor * 2)) latents = latents.view(batch_size, layers + 1, height // 2, width // 2, channels // 4, 2, 2) latents = latents.permute(0, 1, 4, 2, 5, 3, 6) latents = latents.reshape(batch_size, layers + 1, channels // (2 * 2), height, width) latents = latents.permute(0, 2, 1, 3, 4) # (b, c, f, h, w) return latents # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage_edit.QwenImageEditPipeline._encode_vae_image def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i], sample_mode="argmax") for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode="argmax") latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.latent_channels, 1, 1, 1) .to(image_latents.device, image_latents.dtype) ) latents_std = ( torch.tensor(self.vae.config.latents_std) .view(1, self.latent_channels, 1, 1, 1) .to(image_latents.device, image_latents.dtype) ) image_latents = (image_latents - latents_mean) / latents_std return image_latents def prepare_latents( self, image, batch_size, num_channels_latents, height, width, layers, dtype, device, generator, latents=None, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = ( batch_size, layers + 1, num_channels_latents, height, width, ) ### the generated first image is combined image image_latents = None if image is not None: image = image.to(device=device, dtype=dtype) if image.shape[1] != self.latent_channels: image_latents = self._encode_vae_image(image=image, generator=generator) else: image_latents = image if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) image_latent_height, image_latent_width = image_latents.shape[3:] image_latents = image_latents.permute(0, 2, 1, 3, 4) # (b, c, f, h, w) -> (b, f, c, h, w) image_latents = self._pack_latents( image_latents, batch_size, num_channels_latents, image_latent_height, image_latent_width, 1 ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width, layers + 1) else: latents = latents.to(device=device, dtype=dtype) return latents, image_latents @property def guidance_scale(self): return self._guidance_scale @property def attention_kwargs(self): return self._attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput | None = None, prompt: str | list[str] = None, negative_prompt: str | list[str] = None, true_cfg_scale: float = 4.0, layers: int | None = 4, num_inference_steps: int = 50, sigmas: list[float] | None = None, guidance_scale: float | None = None, num_images_per_prompt: int = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, prompt_embeds_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, resolution: int = 640, cfg_normalize: bool = False, use_en_prompt: bool = False, ): r""" Function invoked when calling the pipeline for generation. Args: image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `list[torch.Tensor]`, `list[PIL.Image.Image]`, or `list[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). true_cfg_scale (`float`, *optional*, defaults to 1.0): true_cfg_scale (`float`, *optional*, defaults to 1.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is enabled by setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to None): A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance where the guidance scale is applied during inference through noise prediction rescaling, guidance distilled models take the guidance scale directly as an input parameter during forward pass. Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. This parameter in the pipeline is there to support future guidance-distilled models when they come up. It is ignored when not using guidance distilled models. To enable traditional classifier-free guidance, please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should enable classifier-free guidance computations). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. resolution (`int`, *optional*, defaults to 640): using different bucket in (640, 1024) to determin the condition and output resolution cfg_normalize (`bool`, *optional*, defaults to `False`) whether enable cfg normalization. use_en_prompt (`bool`, *optional*, defaults to `False`) automatic caption language if user does not provide caption Examples: Returns: [`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`: [`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ image_size = image[0].size if isinstance(image, list) else image.size assert resolution in [640, 1024], f"resolution must be either 640 or 1024, but got {resolution}" calculated_width, calculated_height = calculate_dimensions( resolution * resolution, image_size[0] / image_size[1] ) height = calculated_height width = calculated_width multiple_of = self.vae_scale_factor * 2 width = width // multiple_of * multiple_of height = height // multiple_of * multiple_of # 1. Check inputs. Raise error if not correct self.check_inputs( height, width, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_embeds_mask=prompt_embeds_mask, negative_prompt_embeds_mask=negative_prompt_embeds_mask, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Preprocess image if image is not None and not (isinstance(image, torch.Tensor) and image.size(1) == self.latent_channels): image = self.image_processor.resize(image, calculated_height, calculated_width) prompt_image = image image = self.image_processor.preprocess(image, calculated_height, calculated_width) image = image.unsqueeze(2) image = image.to(dtype=self.text_encoder.dtype) if prompt is None or prompt == "" or prompt == " ": prompt = self.get_image_caption(prompt_image, use_en_prompt=use_en_prompt, device=device) # 3. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] has_neg_prompt = negative_prompt is not None or ( negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None ) if true_cfg_scale > 1 and not has_neg_prompt: logger.warning( f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided." ) elif true_cfg_scale <= 1 and has_neg_prompt: logger.warning( " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1" ) do_true_cfg = true_cfg_scale > 1 and has_neg_prompt prompt_embeds, prompt_embeds_mask = self.encode_prompt( prompt=prompt, prompt_embeds=prompt_embeds, prompt_embeds_mask=prompt_embeds_mask, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, ) if do_true_cfg: negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt( prompt=negative_prompt, prompt_embeds=negative_prompt_embeds, prompt_embeds_mask=negative_prompt_embeds_mask, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, ) # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels // 4 latents, image_latents = self.prepare_latents( image, batch_size * num_images_per_prompt, num_channels_latents, height, width, layers, prompt_embeds.dtype, device, generator, latents, ) img_shapes = [ [ *[ (1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2) for _ in range(layers + 1) ], (1, calculated_height // self.vae_scale_factor // 2, calculated_width // self.vae_scale_factor // 2), ] ] * batch_size # 5. Prepare timesteps sigmas = np.linspace(1.0, 0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas image_seq_len = latents.shape[1] base_seqlen = 256 * 256 / 16 / 16 mu = (image_latents.shape[1] / base_seqlen) ** 0.5 timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # handle guidance if self.transformer.config.guidance_embeds and guidance_scale is None: raise ValueError("guidance_scale is required for guidance-distilled model.") elif self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) elif not self.transformer.config.guidance_embeds and guidance_scale is not None: logger.warning( f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled." ) guidance = None elif not self.transformer.config.guidance_embeds and guidance_scale is None: guidance = None if self.attention_kwargs is None: self._attention_kwargs = {} is_rgb = torch.tensor([0] * batch_size).to(device=device, dtype=torch.long) # 6. Denoising loop self.scheduler.set_begin_index(0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = latents if image_latents is not None: latent_model_input = torch.cat([latents, image_latents], dim=1) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) with self.transformer.cache_context("cond"): noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, guidance=guidance, encoder_hidden_states_mask=prompt_embeds_mask, encoder_hidden_states=prompt_embeds, img_shapes=img_shapes, attention_kwargs=self.attention_kwargs, additional_t_cond=is_rgb, return_dict=False, )[0] noise_pred = noise_pred[:, : latents.size(1)] if do_true_cfg: with self.transformer.cache_context("uncond"): neg_noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, guidance=guidance, encoder_hidden_states_mask=negative_prompt_embeds_mask, encoder_hidden_states=negative_prompt_embeds, img_shapes=img_shapes, attention_kwargs=self.attention_kwargs, additional_t_cond=is_rgb, return_dict=False, )[0] neg_noise_pred = neg_noise_pred[:, : latents.size(1)] comb_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) if cfg_normalize: cond_norm = torch.norm(noise_pred, dim=-1, keepdim=True) noise_norm = torch.norm(comb_pred, dim=-1, keepdim=True) noise_pred = comb_pred * (cond_norm / noise_norm) else: noise_pred = comb_pred # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if output_type == "latent": image = latents else: latents = self._unpack_latents(latents, height, width, layers, self.vae_scale_factor) latents = latents.to(self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean b, c, f, h, w = latents.shape latents = latents[:, :, 1:] # remove the first frame as it is the orgin input latents = latents.permute(0, 2, 1, 3, 4).reshape(-1, c, 1, h, w) image = self.vae.decode(latents, return_dict=False)[0] # (b f) c 1 h w image = image.squeeze(2) image = self.image_processor.postprocess(image, output_type=output_type) images = [] for bidx in range(b): images.append(image[bidx * f : (bidx + 1) * f]) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (images,) return QwenImagePipelineOutput(images=images)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/qwenimage/pipeline_qwenimage_layered.py", "license": "Apache License 2.0", "lines": 794, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/models/transformers/transformer_longcat_image.py
# Copyright 2025 MeiTuan LongCat-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import TimestepEmbedding, Timesteps, apply_rotary_emb, get_1d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _get_projections(attn: "LongCatImageAttention", hidden_states, encoder_hidden_states=None): query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) encoder_query = encoder_key = encoder_value = None if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) return query, key, value, encoder_query, encoder_key, encoder_value def _get_fused_projections(attn: "LongCatImageAttention", hidden_states, encoder_hidden_states=None): query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) encoder_query = encoder_key = encoder_value = (None,) if encoder_hidden_states is not None and hasattr(attn, "to_added_qkv"): encoder_query, encoder_key, encoder_value = attn.to_added_qkv(encoder_hidden_states).chunk(3, dim=-1) return query, key, value, encoder_query, encoder_key, encoder_value def _get_qkv_projections(attn: "LongCatImageAttention", hidden_states, encoder_hidden_states=None): if attn.fused_projections: return _get_fused_projections(attn, hidden_states, encoder_hidden_states) return _get_projections(attn, hidden_states, encoder_hidden_states) class LongCatImageAttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.") def __call__( self, attn: "LongCatImageAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( attn, hidden_states, encoder_hidden_states ) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) query = attn.norm_q(query) key = attn.norm_k(key) if attn.added_kv_proj_dim is not None: encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) encoder_query = attn.norm_added_q(encoder_query) encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([encoder_query, query], dim=1) key = torch.cat([encoder_key, key], dim=1) value = torch.cat([encoder_value, value], dim=1) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, backend=self._attention_backend, parallel_config=self._parallel_config, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 ) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return hidden_states, encoder_hidden_states else: return hidden_states class LongCatImageAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = LongCatImageAttnProcessor _available_processors = [ LongCatImageAttnProcessor, ] def __init__( self, query_dim: int, heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, added_kv_proj_dim: int | None = None, added_proj_bias: bool | None = True, out_bias: bool = True, eps: float = 1e-5, out_dim: int = None, context_pre_only: bool | None = None, pre_only: bool = False, elementwise_affine: bool = True, processor=None, ): super().__init__() self.head_dim = dim_head self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.use_bias = bias self.dropout = dropout self.out_dim = out_dim if out_dim is not None else query_dim self.context_pre_only = context_pre_only self.pre_only = pre_only self.heads = out_dim // dim_head if out_dim is not None else heads self.added_kv_proj_dim = added_kv_proj_dim self.added_proj_bias = added_proj_bias self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) if not self.pre_only: self.to_out = torch.nn.ModuleList([]) self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(torch.nn.Dropout(dropout)) if added_kv_proj_dim is not None: self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps) self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps) self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias) if processor is None: processor = self._default_processor_cls() self.set_processor(processor) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, **kwargs, ) -> torch.Tensor: attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) quiet_attn_parameters = {"ip_adapter_masks", "ip_hidden_states"} unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters] if len(unused_kwargs) > 0: logger.warning( f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) @maybe_allow_in_graph class LongCatImageSingleTransformerBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) self.norm = AdaLayerNormZeroSingle(dim) self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) self.attn = LongCatImageAttention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, processor=LongCatImageAttnProcessor(), eps=1e-6, pre_only=True, ) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, joint_attention_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: text_seq_len = encoder_hidden_states.shape[1] hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) residual = hidden_states norm_hidden_states, gate = self.norm(hidden_states, emb=temb) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) joint_attention_kwargs = joint_attention_kwargs or {} attn_output = self.attn( hidden_states=norm_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) gate = gate.unsqueeze(1) hidden_states = gate * self.proj_out(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:] return encoder_hidden_states, hidden_states @maybe_allow_in_graph class LongCatImageTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6 ): super().__init__() self.norm1 = AdaLayerNormZero(dim) self.norm1_context = AdaLayerNormZero(dim) self.attn = LongCatImageAttention( query_dim=dim, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=False, bias=True, processor=LongCatImageAttnProcessor(), eps=eps, ) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, joint_attention_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) joint_attention_kwargs = joint_attention_kwargs or {} # Attention. attention_outputs = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) if len(attention_outputs) == 2: attn_output, context_attn_output = attention_outputs elif len(attention_outputs) == 3: attn_output, context_attn_output, ip_attn_output = attention_outputs # Process attention outputs for the `hidden_states`. attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output if len(attention_outputs) == 3: hidden_states = hidden_states + ip_attn_output # Process attention outputs for the `encoder_hidden_states`. context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return encoder_hidden_states, hidden_states class LongCatImagePosEmbed(nn.Module): def __init__(self, theta: int, axes_dim: list[int]): super().__init__() self.theta = theta self.axes_dim = axes_dim def forward(self, ids: torch.Tensor) -> torch.Tensor: n_axes = ids.shape[-1] cos_out = [] sin_out = [] pos = ids.float() is_mps = ids.device.type == "mps" is_npu = ids.device.type == "npu" freqs_dtype = torch.float32 if (is_mps or is_npu) else torch.float64 for i in range(n_axes): cos, sin = get_1d_rotary_pos_embed( self.axes_dim[i], pos[:, i], theta=self.theta, repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype, ) cos_out.append(cos) sin_out.append(sin) freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) return freqs_cos, freqs_sin class LongCatImageTimestepEmbeddings(nn.Module): def __init__(self, embedding_dim): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) def forward(self, timestep, hidden_dtype): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D) return timesteps_emb class LongCatImageTransformer2DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin, ): """ The Transformer model introduced in Longcat-Image. """ _supports_gradient_checkpointing = True _repeated_blocks = ["LongCatImageTransformerBlock", "LongCatImageSingleTransformerBlock"] @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, num_layers: int = 19, num_single_layers: int = 38, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 3584, pooled_projection_dim: int = 3584, axes_dims_rope: list[int] = [16, 56, 56], ): super().__init__() self.out_channels = in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pooled_projection_dim = pooled_projection_dim self.pos_embed = LongCatImagePosEmbed(theta=10000, axes_dim=axes_dims_rope) self.time_embed = LongCatImageTimestepEmbeddings(embedding_dim=self.inner_dim) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = torch.nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ LongCatImageTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for i in range(num_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ LongCatImageSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for i in range(num_single_layers) ] ) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False self.use_checkpoint = [True] * num_layers self.use_single_checkpoint = [True] * num_single_layers def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, return_dict: bool = True, ) -> torch.FloatTensor | Transformer2DModelOutput: """ The forward method. Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input `hidden_states`. encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. timestep ( `torch.LongTensor`): Used to indicate denoising step. block_controlnet_hidden_states: (`list` of `torch.Tensor`): A list of tensors that if specified are added to the residuals of transformer blocks. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ hidden_states = self.x_embedder(hidden_states) timestep = timestep.to(hidden_states.dtype) * 1000 temb = self.time_embed(timestep, hidden_states.dtype) encoder_hidden_states = self.context_embedder(encoder_hidden_states) ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing and self.use_checkpoint[index_block]: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing and self.use_single_checkpoint[index_block]: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_longcat_image.py", "license": "Apache License 2.0", "lines": 453, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/longcat_image/pipeline_longcat_image_edit.py
# Copyright 2025 MeiTuan LongCat-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math import re from typing import Any import numpy as np import PIL import torch from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor from ...image_processor import VaeImageProcessor from ...loaders import FromSingleFileMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import LongCatImageTransformer2DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from .pipeline_output import LongCatImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import LongCatImageEditPipeline >>> pipe = LongCatImageEditPipeline.from_pretrained( ... "meituan-longcat/LongCat-Image-Edit", torch_dtype=torch.bfloat16 ... ) >>> pipe.to("cuda") >>> prompt = "change the cat to dog." >>> input_image = Image.open("test.jpg").convert("RGB") >>> image = pipe( ... input_image, ... prompt, ... num_inference_steps=50, ... guidance_scale=4.5, ... generator=torch.Generator("cpu").manual_seed(43), ... ).images[0] >>> image.save("longcat_image_edit.png") ``` """ # Copied from diffusers.pipelines.longcat_image.pipeline_longcat_image.split_quotation def split_quotation(prompt, quote_pairs=None): """ Implement a regex-based string splitting algorithm that identifies delimiters defined by single or double quote pairs. Examples:: >>> prompt_en = "Please write 'Hello' on the blackboard for me." >>> print(split_quotation(prompt_en)) >>> # output: [('Please write ', False), ("'Hello'", True), (' on the blackboard for me.', False)] """ word_internal_quote_pattern = re.compile(r"[a-zA-Z]+'[a-zA-Z]+") matches_word_internal_quote_pattern = word_internal_quote_pattern.findall(prompt) mapping_word_internal_quote = [] for i, word_src in enumerate(set(matches_word_internal_quote_pattern)): word_tgt = "longcat_$##$_longcat" * (i + 1) prompt = prompt.replace(word_src, word_tgt) mapping_word_internal_quote.append([word_src, word_tgt]) if quote_pairs is None: quote_pairs = [("'", "'"), ('"', '"'), ("‘", "’"), ("“", "”")] pattern = "|".join([re.escape(q1) + r"[^" + re.escape(q1 + q2) + r"]*?" + re.escape(q2) for q1, q2 in quote_pairs]) parts = re.split(f"({pattern})", prompt) result = [] for part in parts: for word_src, word_tgt in mapping_word_internal_quote: part = part.replace(word_tgt, word_src) if re.match(pattern, part): if len(part): result.append((part, True)) else: if len(part): result.append((part, False)) return result # Copied from diffusers.pipelines.longcat_image.pipeline_longcat_image.prepare_pos_ids def prepare_pos_ids(modality_id=0, type="text", start=(0, 0), num_token=None, height=None, width=None): if type == "text": assert num_token if height or width: print('Warning: The parameters of height and width will be ignored in "text" type.') pos_ids = torch.zeros(num_token, 3) pos_ids[..., 0] = modality_id pos_ids[..., 1] = torch.arange(num_token) + start[0] pos_ids[..., 2] = torch.arange(num_token) + start[1] elif type == "image": assert height and width if num_token: print('Warning: The parameter of num_token will be ignored in "image" type.') pos_ids = torch.zeros(height, width, 3) pos_ids[..., 0] = modality_id pos_ids[..., 1] = pos_ids[..., 1] + torch.arange(height)[:, None] + start[0] pos_ids[..., 2] = pos_ids[..., 2] + torch.arange(width)[None, :] + start[1] pos_ids = pos_ids.reshape(height * width, 3) else: raise KeyError(f'Unknow type {type}, only support "text" or "image".') return pos_ids # Copied from diffusers.pipelines.longcat_image.pipeline_longcat_image.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def calculate_dimensions(target_area, ratio): width = math.sqrt(target_area * ratio) height = width / ratio width = width if width % 16 == 0 else (width // 16 + 1) * 16 height = height if height % 16 == 0 else (height // 16 + 1) * 16 width = int(width) height = int(height) return width, height class LongCatImageEditPipeline(DiffusionPipeline, FromSingleFileMixin): r""" The LongCat-Image-Edit pipeline for image editing. """ model_cpu_offload_seq = "text_encoder->image_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: Qwen2_5_VLForConditionalGeneration, tokenizer: Qwen2Tokenizer, text_processor: Qwen2VLProcessor, transformer: LongCatImageTransformer2DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_processor=text_processor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.image_processor_vl = text_processor.image_processor self.image_token = "<|image_pad|>" self.prompt_template_encode_prefix = "<|im_start|>system\nAs an image editing expert, first analyze the content and attributes of the input image(s). Then, based on the user's editing instructions, clearly and precisely determine how to modify the given image(s), ensuring that only the specified parts are altered and all other aspects remain consistent with the original(s).<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>" self.prompt_template_encode_suffix = "<|im_end|>\n<|im_start|>assistant\n" self.default_sample_size = 128 self.tokenizer_max_length = 512 def _encode_prompt(self, prompt, image): raw_vl_input = self.image_processor_vl(images=image, return_tensors="pt") pixel_values = raw_vl_input["pixel_values"] image_grid_thw = raw_vl_input["image_grid_thw"] all_tokens = [] for clean_prompt_sub, matched in split_quotation(prompt[0]): if matched: for sub_word in clean_prompt_sub: tokens = self.tokenizer(sub_word, add_special_tokens=False)["input_ids"] all_tokens.extend(tokens) else: tokens = self.tokenizer(clean_prompt_sub, add_special_tokens=False)["input_ids"] all_tokens.extend(tokens) if len(all_tokens) > self.tokenizer_max_length: logger.warning( "Your input was truncated because `max_sequence_length` is set to " f" {self.tokenizer_max_length} input token nums : {len(len(all_tokens))}" ) all_tokens = all_tokens[: self.tokenizer_max_length] text_tokens_and_mask = self.tokenizer.pad( {"input_ids": [all_tokens]}, max_length=self.tokenizer_max_length, padding="max_length", return_attention_mask=True, return_tensors="pt", ) text = self.prompt_template_encode_prefix merge_length = self.image_processor_vl.merge_size**2 while self.image_token in text: num_image_tokens = image_grid_thw.prod() // merge_length text = text.replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1) text = text.replace("<|placeholder|>", self.image_token) prefix_tokens = self.tokenizer(text, add_special_tokens=False)["input_ids"] suffix_tokens = self.tokenizer(self.prompt_template_encode_suffix, add_special_tokens=False)["input_ids"] vision_start_token_id = self.tokenizer.convert_tokens_to_ids("<|vision_start|>") prefix_len = prefix_tokens.index(vision_start_token_id) suffix_len = len(suffix_tokens) prefix_tokens_mask = torch.tensor([1] * len(prefix_tokens), dtype=text_tokens_and_mask.attention_mask[0].dtype) suffix_tokens_mask = torch.tensor([1] * len(suffix_tokens), dtype=text_tokens_and_mask.attention_mask[0].dtype) prefix_tokens = torch.tensor(prefix_tokens, dtype=text_tokens_and_mask.input_ids.dtype) suffix_tokens = torch.tensor(suffix_tokens, dtype=text_tokens_and_mask.input_ids.dtype) input_ids = torch.cat((prefix_tokens, text_tokens_and_mask.input_ids[0], suffix_tokens), dim=-1) attention_mask = torch.cat( (prefix_tokens_mask, text_tokens_and_mask.attention_mask[0], suffix_tokens_mask), dim=-1 ) input_ids = input_ids.unsqueeze(0).to(self.device) attention_mask = attention_mask.unsqueeze(0).to(self.device) pixel_values = pixel_values.to(self.device) image_grid_thw = image_grid_thw.to(self.device) text_output = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, pixel_values=pixel_values, image_grid_thw=image_grid_thw, output_hidden_states=True, ) # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] # clone to have a contiguous tensor prompt_embeds = text_output.hidden_states[-1].detach() prompt_embeds = prompt_embeds[:, prefix_len:-suffix_len, :] return prompt_embeds def encode_prompt( self, prompt: list[str] = None, image: torch.Tensor | None = None, num_images_per_prompt: int | None = 1, prompt_embeds: torch.Tensor | None = None, ): prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) # If prompt_embeds is provided and prompt is None, skip encoding if prompt_embeds is None: prompt_embeds = self._encode_prompt(prompt, image) _, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) text_ids = prepare_pos_ids(modality_id=0, type="text", start=(0, 0), num_token=prompt_embeds.shape[1]).to( self.device ) return prompt_embeds, text_ids @staticmethod def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents @staticmethod def _unpack_latents(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (vae_scale_factor * 2)) width = 2 * (int(width) // (vae_scale_factor * 2)) latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height, width) return latents def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i], sample_mode="argmax") for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode="argmax") image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor return image_latents @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 def prepare_latents( self, image, batch_size, num_channels_latents, height, width, dtype, prompt_embeds_length, device, generator, latents=None, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) image_latents, image_latents_ids = None, None if image is not None: image = image.to(device=self.device, dtype=dtype) if image.shape[1] != self.vae.config.latent_channels: image_latents = self._encode_vae_image(image=image, generator=generator) else: image_latents = image if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) image_latents = self._pack_latents(image_latents, batch_size, num_channels_latents, height, width) image_latents_ids = prepare_pos_ids( modality_id=2, type="image", start=(prompt_embeds_length, prompt_embeds_length), height=height // 2, width=width // 2, ).to(device, dtype=torch.float64) shape = (batch_size, num_channels_latents, height, width) latents_ids = prepare_pos_ids( modality_id=1, type="image", start=(prompt_embeds_length, prompt_embeds_length), height=height // 2, width=width // 2, ).to(device) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) else: latents = latents.to(device=device, dtype=dtype) return latents, image_latents, latents_ids, image_latents_ids @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None ): if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None: if isinstance(prompt, str): pass elif isinstance(prompt, list) and len(prompt) == 1: pass else: raise ValueError( f"`prompt` must be a `str` or a `list` of length 1, but is {prompt} (type: {type(prompt)})" ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) @replace_example_docstring(EXAMPLE_DOC_STRING) @torch.no_grad() def __call__( self, image: PIL.Image.Image | None = None, prompt: str | list[str] = None, negative_prompt: str | list[str] = None, num_inference_steps: int = 50, sigmas: list[float] | None = None, guidance_scale: float = 4.5, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: torch.FloatTensor | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: dict[str, Any] | None = None, ): r""" Function invoked when calling the pipeline for generation. Examples: Returns: [`~pipelines.LongCatImagePipelineOutput`] or `tuple`: [`~pipelines.LongCatImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ image_size = image[0].size if isinstance(image, list) else image.size calculated_width, calculated_height = calculate_dimensions(1024 * 1024, image_size[0] * 1.0 / image_size[1]) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, calculated_height, calculated_width, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._current_timestep = None self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. Preprocess image if image is not None and not (isinstance(image, torch.Tensor) and image.size(1) == self.latent_channels): image = self.image_processor.resize(image, calculated_height, calculated_width) prompt_image = self.image_processor.resize(image, calculated_height // 2, calculated_width // 2) image = self.image_processor.preprocess(image, calculated_height, calculated_width) negative_prompt = "" if negative_prompt is None else negative_prompt (prompt_embeds, text_ids) = self.encode_prompt( prompt=prompt, image=prompt_image, prompt_embeds=prompt_embeds, num_images_per_prompt=num_images_per_prompt ) if self.do_classifier_free_guidance: (negative_prompt_embeds, negative_text_ids) = self.encode_prompt( prompt=negative_prompt, image=prompt_image, prompt_embeds=negative_prompt_embeds, num_images_per_prompt=num_images_per_prompt, ) # 4. Prepare latent variables num_channels_latents = 16 latents, image_latents, latents_ids, image_latents_ids = self.prepare_latents( image, batch_size * num_images_per_prompt, num_channels_latents, calculated_height, calculated_width, prompt_embeds.dtype, prompt_embeds.shape[1], device, generator, latents, ) # 5. Prepare timesteps sigmas = np.linspace(1.0, 1.0 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas image_seq_len = latents.shape[1] mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # handle guidance guidance = None if self.joint_attention_kwargs is None: self._joint_attention_kwargs = {} if image is not None: latent_image_ids = torch.cat([latents_ids, image_latents_ids], dim=0) else: latent_image_ids = latents_ids # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = latents if image_latents is not None: latent_model_input = torch.cat([latents, image_latents], dim=1) timestep = t.expand(latent_model_input.shape[0]).to(latents.dtype) with self.transformer.cache_context("cond"): noise_pred_text = self.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, guidance=guidance, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, return_dict=False, )[0] noise_pred_text = noise_pred_text[:, :image_seq_len] if self.do_classifier_free_guidance: with self.transformer.cache_context("uncond"): noise_pred_uncond = self.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, encoder_hidden_states=negative_prompt_embeds, txt_ids=negative_text_ids, img_ids=latent_image_ids, return_dict=False, )[0] noise_pred_uncond = noise_pred_uncond[:, :image_seq_len] noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) else: noise_pred = noise_pred_text # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if output_type == "latent": image = latents else: latents = self._unpack_latents(latents, calculated_height, calculated_width, self.vae_scale_factor) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor if latents.dtype != self.vae.dtype: latents = latents.to(dtype=self.vae.dtype) image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return LongCatImagePipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/longcat_image/pipeline_longcat_image_edit.py", "license": "Apache License 2.0", "lines": 617, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/longcat_image/pipeline_output.py
from dataclasses import dataclass import numpy as np import PIL.Image from diffusers.utils import BaseOutput @dataclass class LongCatImagePipelineOutput(BaseOutput): """ Output class for Stable Diffusion pipelines. Args: images (`list[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: list[PIL.Image.Image, np.ndarray]
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/longcat_image/pipeline_output.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:examples/research_projects/control_lora/control_lora.py
import cv2 import numpy as np import torch from PIL import Image from diffusers import ( AutoencoderKL, ControlNetModel, StableDiffusionXLControlNetPipeline, UNet2DConditionModel, ) from diffusers.utils import load_image, make_image_grid pipe_id = "stabilityai/stable-diffusion-xl-base-1.0" lora_id = "stabilityai/control-lora" lora_filename = "control-LoRAs-rank128/control-lora-canny-rank128.safetensors" unet = UNet2DConditionModel.from_pretrained(pipe_id, subfolder="unet", torch_dtype=torch.bfloat16).to("cuda") controlnet = ControlNetModel.from_unet(unet).to(device="cuda", dtype=torch.bfloat16) controlnet.load_lora_adapter(lora_id, weight_name=lora_filename, prefix=None, controlnet_config=controlnet.config) prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting" negative_prompt = "low quality, bad quality, sketches" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png" ) controlnet_conditioning_scale = 1.0 # recommended for good generalization vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", torch_dtype=torch.bfloat16) pipe = StableDiffusionXLControlNetPipeline.from_pretrained( pipe_id, unet=unet, controlnet=controlnet, vae=vae, torch_dtype=torch.bfloat16, safety_checker=None, ).to("cuda") image = np.array(image) image = cv2.Canny(image, 100, 200) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) image = Image.fromarray(image) images = pipe( prompt, negative_prompt=negative_prompt, image=image, controlnet_conditioning_scale=controlnet_conditioning_scale, num_images_per_prompt=4, ).images final_image = [image] + images grid = make_image_grid(final_image, 1, 5) grid.save("hf-logo_canny.png")
{ "repo_id": "huggingface/diffusers", "file_path": "examples/research_projects/control_lora/control_lora.py", "license": "Apache License 2.0", "lines": 47, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:src/diffusers/modular_pipelines/flux2/before_denoise.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import numpy as np import torch from ...models import Flux2Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam from .modular_pipeline import Flux2ModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name def compute_empirical_mu(image_seq_len: int, num_steps: int) -> float: """Compute empirical mu for Flux2 timestep scheduling.""" a1, b1 = 8.73809524e-05, 1.89833333 a2, b2 = 0.00016927, 0.45666666 if image_seq_len > 4300: mu = a2 * image_seq_len + b2 return float(mu) m_200 = a2 * image_seq_len + b2 m_10 = a1 * image_seq_len + b1 a = (m_200 - m_10) / 190.0 b = m_200 - 200.0 * a mu = a * num_steps + b return float(mu) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class Flux2SetTimestepsStep(ModularPipelineBlocks): model_name = "flux2" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), ComponentSpec("transformer", Flux2Transformer2DModel), ] @property def description(self) -> str: return "Step that sets the scheduler's timesteps for Flux2 inference using empirical mu calculation" @property def inputs(self) -> list[InputParam]: return [ InputParam("num_inference_steps", default=50), InputParam("timesteps"), InputParam("sigmas"), InputParam("latents", type_hint=torch.Tensor), InputParam("height", type_hint=int), InputParam("width", type_hint=int), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam("timesteps", type_hint=torch.Tensor, description="The timesteps to use for inference"), OutputParam( "num_inference_steps", type_hint=int, description="The number of denoising steps to perform at inference time", ), ] @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) device = components._execution_device scheduler = components.scheduler height = block_state.height or components.default_height width = block_state.width or components.default_width vae_scale_factor = components.vae_scale_factor latent_height = 2 * (int(height) // (vae_scale_factor * 2)) latent_width = 2 * (int(width) // (vae_scale_factor * 2)) image_seq_len = (latent_height // 2) * (latent_width // 2) num_inference_steps = block_state.num_inference_steps sigmas = block_state.sigmas timesteps = block_state.timesteps if timesteps is None and sigmas is None: sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if hasattr(scheduler.config, "use_flow_sigmas") and scheduler.config.use_flow_sigmas: sigmas = None mu = compute_empirical_mu(image_seq_len=image_seq_len, num_steps=num_inference_steps) timesteps, num_inference_steps = retrieve_timesteps( scheduler, num_inference_steps, device, timesteps=timesteps, sigmas=sigmas, mu=mu, ) block_state.timesteps = timesteps block_state.num_inference_steps = num_inference_steps components.scheduler.set_begin_index(0) self.set_block_state(state, block_state) return components, state class Flux2PrepareLatentsStep(ModularPipelineBlocks): model_name = "flux2" @property def expected_components(self) -> list[ComponentSpec]: return [] @property def description(self) -> str: return "Prepare latents step that prepares the initial noise latents for Flux2 text-to-image generation" @property def inputs(self) -> list[InputParam]: return [ InputParam("height", type_hint=int), InputParam("width", type_hint=int), InputParam("latents", type_hint=torch.Tensor | None), InputParam("num_images_per_prompt", type_hint=int, default=1), InputParam("generator"), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be `batch_size * num_images_per_prompt`.", ), InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process" ), OutputParam("latent_ids", type_hint=torch.Tensor, description="Position IDs for the latents (for RoPE)"), ] @staticmethod def check_inputs(components, block_state): vae_scale_factor = components.vae_scale_factor if (block_state.height is not None and block_state.height % (vae_scale_factor * 2) != 0) or ( block_state.width is not None and block_state.width % (vae_scale_factor * 2) != 0 ): logger.warning( f"`height` and `width` have to be divisible by {vae_scale_factor * 2} but are {block_state.height} and {block_state.width}." ) @staticmethod def _prepare_latent_ids(latents: torch.Tensor): """ Generates 4D position coordinates (T, H, W, L) for latent tensors. Args: latents: Latent tensor of shape (B, C, H, W) Returns: Position IDs tensor of shape (B, H*W, 4) """ batch_size, _, height, width = latents.shape t = torch.arange(1) h = torch.arange(height) w = torch.arange(width) l = torch.arange(1) latent_ids = torch.cartesian_prod(t, h, w, l) latent_ids = latent_ids.unsqueeze(0).expand(batch_size, -1, -1) return latent_ids @staticmethod def _pack_latents(latents): """Pack latents: (batch_size, num_channels, height, width) -> (batch_size, height * width, num_channels)""" batch_size, num_channels, height, width = latents.shape latents = latents.reshape(batch_size, num_channels, height * width).permute(0, 2, 1) return latents @staticmethod def prepare_latents( comp, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): height = 2 * (int(height) // (comp.vae_scale_factor * 2)) width = 2 * (int(width) // (comp.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents * 4, height // 2, width // 2) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) return latents @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.height = block_state.height or components.default_height block_state.width = block_state.width or components.default_width block_state.device = components._execution_device block_state.num_channels_latents = components.num_channels_latents self.check_inputs(components, block_state) batch_size = block_state.batch_size * block_state.num_images_per_prompt latents = self.prepare_latents( components, batch_size, block_state.num_channels_latents, block_state.height, block_state.width, block_state.dtype, block_state.device, block_state.generator, block_state.latents, ) latent_ids = self._prepare_latent_ids(latents) latent_ids = latent_ids.to(block_state.device) latents = self._pack_latents(latents) block_state.latents = latents block_state.latent_ids = latent_ids self.set_block_state(state, block_state) return components, state class Flux2RoPEInputsStep(ModularPipelineBlocks): model_name = "flux2" @property def description(self) -> str: return "Step that prepares the 4D RoPE position IDs for Flux2 denoising. Should be placed after text encoder and latent preparation steps." @property def inputs(self) -> list[InputParam]: return [ InputParam(name="prompt_embeds", required=True), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( name="txt_ids", kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="4D position IDs (T, H, W, L) for text tokens, used for RoPE calculation.", ), ] @staticmethod def _prepare_text_ids(x: torch.Tensor, t_coord: torch.Tensor | None = None): """Prepare 4D position IDs for text tokens.""" B, L, _ = x.shape out_ids = [] for i in range(B): t = torch.arange(1) if t_coord is None else t_coord[i] h = torch.arange(1) w = torch.arange(1) seq_l = torch.arange(L) coords = torch.cartesian_prod(t, h, w, seq_l) out_ids.append(coords) return torch.stack(out_ids) def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) prompt_embeds = block_state.prompt_embeds device = prompt_embeds.device block_state.txt_ids = self._prepare_text_ids(prompt_embeds) block_state.txt_ids = block_state.txt_ids.to(device) self.set_block_state(state, block_state) return components, state class Flux2KleinBaseRoPEInputsStep(ModularPipelineBlocks): model_name = "flux2-klein" @property def description(self) -> str: return "Step that prepares the 4D RoPE position IDs for Flux2-Klein base model denoising. Should be placed after text encoder and latent preparation steps." @property def inputs(self) -> list[InputParam]: return [ InputParam(name="prompt_embeds", required=True), InputParam(name="negative_prompt_embeds", required=False), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( name="txt_ids", kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="4D position IDs (T, H, W, L) for text tokens, used for RoPE calculation.", ), OutputParam( name="negative_txt_ids", kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="4D position IDs (T, H, W, L) for negative text tokens, used for RoPE calculation.", ), ] @staticmethod def _prepare_text_ids(x: torch.Tensor, t_coord: torch.Tensor | None = None): """Prepare 4D position IDs for text tokens.""" B, L, _ = x.shape out_ids = [] for i in range(B): t = torch.arange(1) if t_coord is None else t_coord[i] h = torch.arange(1) w = torch.arange(1) seq_l = torch.arange(L) coords = torch.cartesian_prod(t, h, w, seq_l) out_ids.append(coords) return torch.stack(out_ids) def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) prompt_embeds = block_state.prompt_embeds device = prompt_embeds.device block_state.txt_ids = self._prepare_text_ids(prompt_embeds) block_state.txt_ids = block_state.txt_ids.to(device) block_state.negative_txt_ids = None if block_state.negative_prompt_embeds is not None: block_state.negative_txt_ids = self._prepare_text_ids(block_state.negative_prompt_embeds) block_state.negative_txt_ids = block_state.negative_txt_ids.to(device) self.set_block_state(state, block_state) return components, state class Flux2PrepareImageLatentsStep(ModularPipelineBlocks): model_name = "flux2" @property def description(self) -> str: return "Step that prepares image latents and their position IDs for Flux2 image conditioning." @property def inputs(self) -> list[InputParam]: return [ InputParam("image_latents", type_hint=list[torch.Tensor]), InputParam("batch_size", required=True, type_hint=int), InputParam("num_images_per_prompt", default=1, type_hint=int), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "image_latents", type_hint=torch.Tensor, description="Packed image latents for conditioning", ), OutputParam( "image_latent_ids", type_hint=torch.Tensor, description="Position IDs for image latents", ), ] @staticmethod def _prepare_image_ids(image_latents: list[torch.Tensor], scale: int = 10): """ Generates 4D time-space coordinates (T, H, W, L) for a sequence of image latents. Args: image_latents: A list of image latent feature tensors of shape (1, C, H, W). scale: Factor used to define the time separation between latents. Returns: Combined coordinate tensor of shape (1, N_total, 4) """ if not isinstance(image_latents, list): raise ValueError(f"Expected `image_latents` to be a list, got {type(image_latents)}.") t_coords = [scale + scale * t for t in torch.arange(0, len(image_latents))] t_coords = [t.view(-1) for t in t_coords] image_latent_ids = [] for x, t in zip(image_latents, t_coords): x = x.squeeze(0) _, height, width = x.shape x_ids = torch.cartesian_prod(t, torch.arange(height), torch.arange(width), torch.arange(1)) image_latent_ids.append(x_ids) image_latent_ids = torch.cat(image_latent_ids, dim=0) image_latent_ids = image_latent_ids.unsqueeze(0) return image_latent_ids @staticmethod def _pack_latents(latents): """Pack latents: (batch_size, num_channels, height, width) -> (batch_size, height * width, num_channels)""" batch_size, num_channels, height, width = latents.shape latents = latents.reshape(batch_size, num_channels, height * width).permute(0, 2, 1) return latents @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) image_latents = block_state.image_latents if image_latents is None: block_state.image_latents = None block_state.image_latent_ids = None self.set_block_state(state, block_state) return components, state device = components._execution_device batch_size = block_state.batch_size * block_state.num_images_per_prompt image_latent_ids = self._prepare_image_ids(image_latents) packed_latents = [] for latent in image_latents: packed = self._pack_latents(latent) packed = packed.squeeze(0) packed_latents.append(packed) image_latents = torch.cat(packed_latents, dim=0) image_latents = image_latents.unsqueeze(0) image_latents = image_latents.repeat(batch_size, 1, 1) image_latent_ids = image_latent_ids.repeat(batch_size, 1, 1) image_latent_ids = image_latent_ids.to(device) block_state.image_latents = image_latents block_state.image_latent_ids = image_latent_ids self.set_block_state(state, block_state) return components, state class Flux2PrepareGuidanceStep(ModularPipelineBlocks): model_name = "flux2" @property def description(self) -> str: return "Step that prepares the guidance scale tensor for Flux2 inference" @property def inputs(self) -> list[InputParam]: return [ InputParam("guidance_scale", default=4.0), InputParam("num_images_per_prompt", default=1), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be `batch_size * num_images_per_prompt`.", ), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam("guidance", type_hint=torch.Tensor, description="Guidance scale tensor"), ] @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) device = components._execution_device batch_size = block_state.batch_size * block_state.num_images_per_prompt guidance = torch.full([1], block_state.guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(batch_size) block_state.guidance = guidance self.set_block_state(state, block_state) return components, state
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/flux2/before_denoise.py", "license": "Apache License 2.0", "lines": 478, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/flux2/decoders.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Any, Union import numpy as np import PIL import torch from ...configuration_utils import FrozenDict from ...models import AutoencoderKLFlux2 from ...pipelines.flux2.image_processor import Flux2ImageProcessor from ...utils import logging from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam logger = logging.get_logger(__name__) # pylint: disable=invalid-name class Flux2UnpackLatentsStep(ModularPipelineBlocks): model_name = "flux2" @property def description(self) -> str: return "Step that unpacks the latents from the denoising step" @property def inputs(self) -> list[tuple[str, Any]]: return [ InputParam( "latents", required=True, type_hint=torch.Tensor, description="The denoised latents from the denoising step", ), InputParam( "latent_ids", required=True, type_hint=torch.Tensor, description="Position IDs for the latents, used for unpacking", ), ] @property def intermediate_outputs(self) -> list[str]: return [ OutputParam( "latents", type_hint=torch.Tensor, description="The denoise latents from denoising step, unpacked with position IDs.", ) ] @staticmethod def _unpack_latents_with_ids(x: torch.Tensor, x_ids: torch.Tensor) -> torch.Tensor: """ Unpack latents using position IDs to scatter tokens into place. Args: x: Packed latents tensor of shape (B, seq_len, C) x_ids: Position IDs tensor of shape (B, seq_len, 4) with (T, H, W, L) coordinates Returns: Unpacked latents tensor of shape (B, C, H, W) """ x_list = [] for data, pos in zip(x, x_ids): _, ch = data.shape # noqa: F841 h_ids = pos[:, 1].to(torch.int64) w_ids = pos[:, 2].to(torch.int64) h = torch.max(h_ids) + 1 w = torch.max(w_ids) + 1 flat_ids = h_ids * w + w_ids out = torch.zeros((h * w, ch), device=data.device, dtype=data.dtype) out.scatter_(0, flat_ids.unsqueeze(1).expand(-1, ch), data) out = out.view(h, w, ch).permute(2, 0, 1) x_list.append(out) return torch.stack(x_list, dim=0) @torch.no_grad() def __call__(self, components, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) latents = block_state.latents latent_ids = block_state.latent_ids latents = self._unpack_latents_with_ids(latents, latent_ids) block_state.latents = latents self.set_block_state(state, block_state) return components, state class Flux2DecodeStep(ModularPipelineBlocks): model_name = "flux2" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("vae", AutoencoderKLFlux2), ComponentSpec( "image_processor", Flux2ImageProcessor, config=FrozenDict({"vae_scale_factor": 16, "vae_latent_channels": 32}), default_creation_method="from_config", ), ] @property def description(self) -> str: return "Step that decodes the denoised latents into images using Flux2 VAE with batch norm denormalization" @property def inputs(self) -> list[tuple[str, Any]]: return [ InputParam("output_type", default="pil"), InputParam( "latents", required=True, type_hint=torch.Tensor, description="The denoised latents from the denoising step", ), ] @property def intermediate_outputs(self) -> list[str]: return [ OutputParam( "images", type_hint=Union[list[PIL.Image.Image], torch.Tensor, np.ndarray], description="The generated images, can be a list of PIL.Image.Image, torch.Tensor or a numpy array", ) ] @staticmethod def _unpatchify_latents(latents): """Convert patchified latents back to regular format.""" batch_size, num_channels_latents, height, width = latents.shape latents = latents.reshape(batch_size, num_channels_latents // (2 * 2), 2, 2, height, width) latents = latents.permute(0, 1, 4, 2, 5, 3) latents = latents.reshape(batch_size, num_channels_latents // (2 * 2), height * 2, width * 2) return latents @torch.no_grad() def __call__(self, components, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) vae = components.vae latents = block_state.latents latents_bn_mean = vae.bn.running_mean.view(1, -1, 1, 1).to(latents.device, latents.dtype) latents_bn_std = torch.sqrt(vae.bn.running_var.view(1, -1, 1, 1) + vae.config.batch_norm_eps).to( latents.device, latents.dtype ) latents = latents * latents_bn_std + latents_bn_mean latents = self._unpatchify_latents(latents) block_state.images = vae.decode(latents, return_dict=False)[0] block_state.images = components.image_processor.postprocess( block_state.images, output_type=block_state.output_type ) self.set_block_state(state, block_state) return components, state
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/flux2/decoders.py", "license": "Apache License 2.0", "lines": 148, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/flux2/denoise.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch from ...configuration_utils import FrozenDict from ...guiders import ClassifierFreeGuidance from ...models import Flux2Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging from ..modular_pipeline import ( BlockState, LoopSequentialPipelineBlocks, ModularPipelineBlocks, PipelineState, ) from ..modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, OutputParam from .modular_pipeline import Flux2KleinModularPipeline, Flux2ModularPipeline if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name class Flux2LoopDenoiser(ModularPipelineBlocks): model_name = "flux2" @property def expected_components(self) -> list[ComponentSpec]: return [ComponentSpec("transformer", Flux2Transformer2DModel)] @property def description(self) -> str: return ( "Step within the denoising loop that denoises the latents for Flux2. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `Flux2DenoiseLoopWrapper`)" ) @property def inputs(self) -> list[tuple[str, Any]]: return [ InputParam("joint_attention_kwargs"), InputParam( "latents", required=True, type_hint=torch.Tensor, description="The latents to denoise. Shape: (B, seq_len, C)", ), InputParam( "image_latents", type_hint=torch.Tensor, description="Packed image latents for conditioning. Shape: (B, img_seq_len, C)", ), InputParam( "image_latent_ids", type_hint=torch.Tensor, description="Position IDs for image latents. Shape: (B, img_seq_len, 4)", ), InputParam( "guidance", required=True, type_hint=torch.Tensor, description="Guidance scale as a tensor", ), InputParam( "prompt_embeds", required=True, type_hint=torch.Tensor, description="Text embeddings from Mistral3", ), InputParam( "txt_ids", required=True, type_hint=torch.Tensor, description="4D position IDs for text tokens (T, H, W, L)", ), InputParam( "latent_ids", required=True, type_hint=torch.Tensor, description="4D position IDs for latent tokens (T, H, W, L)", ), ] @torch.no_grad() def __call__( self, components: Flux2ModularPipeline, block_state: BlockState, i: int, t: torch.Tensor ) -> PipelineState: latents = block_state.latents latent_model_input = latents.to(components.transformer.dtype) img_ids = block_state.latent_ids image_latents = getattr(block_state, "image_latents", None) if image_latents is not None: latent_model_input = torch.cat([latents, image_latents], dim=1).to(components.transformer.dtype) image_latent_ids = block_state.image_latent_ids img_ids = torch.cat([img_ids, image_latent_ids], dim=1) timestep = t.expand(latents.shape[0]).to(latents.dtype) noise_pred = components.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, guidance=block_state.guidance, encoder_hidden_states=block_state.prompt_embeds, txt_ids=block_state.txt_ids, img_ids=img_ids, joint_attention_kwargs=block_state.joint_attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred[:, : latents.size(1)] block_state.noise_pred = noise_pred return components, block_state # same as Flux2LoopDenoiser but guidance=None class Flux2KleinLoopDenoiser(ModularPipelineBlocks): model_name = "flux2-klein" @property def expected_components(self) -> list[ComponentSpec]: return [ComponentSpec("transformer", Flux2Transformer2DModel)] @property def description(self) -> str: return ( "Step within the denoising loop that denoises the latents for Flux2. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `Flux2DenoiseLoopWrapper`)" ) @property def inputs(self) -> list[tuple[str, Any]]: return [ InputParam("joint_attention_kwargs"), InputParam( "latents", required=True, type_hint=torch.Tensor, description="The latents to denoise. Shape: (B, seq_len, C)", ), InputParam( "image_latents", type_hint=torch.Tensor, description="Packed image latents for conditioning. Shape: (B, img_seq_len, C)", ), InputParam( "image_latent_ids", type_hint=torch.Tensor, description="Position IDs for image latents. Shape: (B, img_seq_len, 4)", ), InputParam( "prompt_embeds", required=True, type_hint=torch.Tensor, description="Text embeddings from Qwen3", ), InputParam( "txt_ids", required=True, type_hint=torch.Tensor, description="4D position IDs for text tokens (T, H, W, L)", ), InputParam( "latent_ids", required=True, type_hint=torch.Tensor, description="4D position IDs for latent tokens (T, H, W, L)", ), ] @torch.no_grad() def __call__( self, components: Flux2KleinModularPipeline, block_state: BlockState, i: int, t: torch.Tensor ) -> PipelineState: latents = block_state.latents latent_model_input = latents.to(components.transformer.dtype) img_ids = block_state.latent_ids image_latents = getattr(block_state, "image_latents", None) if image_latents is not None: latent_model_input = torch.cat([latents, image_latents], dim=1).to(components.transformer.dtype) image_latent_ids = block_state.image_latent_ids img_ids = torch.cat([img_ids, image_latent_ids], dim=1) timestep = t.expand(latents.shape[0]).to(latents.dtype) noise_pred = components.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, guidance=None, encoder_hidden_states=block_state.prompt_embeds, txt_ids=block_state.txt_ids, img_ids=img_ids, joint_attention_kwargs=block_state.joint_attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred[:, : latents.size(1)] block_state.noise_pred = noise_pred return components, block_state # support CFG for Flux2-Klein base model class Flux2KleinBaseLoopDenoiser(ModularPipelineBlocks): model_name = "flux2-klein" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("transformer", Flux2Transformer2DModel), ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 4.0}), default_creation_method="from_config", ), ] @property def expected_configs(self) -> list[ConfigSpec]: return [ ConfigSpec(name="is_distilled", default=False), ] @property def description(self) -> str: return ( "Step within the denoising loop that denoises the latents for Flux2. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `Flux2DenoiseLoopWrapper`)" ) @property def inputs(self) -> list[tuple[str, Any]]: return [ InputParam("joint_attention_kwargs"), InputParam( "latents", required=True, type_hint=torch.Tensor, description="The latents to denoise. Shape: (B, seq_len, C)", ), InputParam( "image_latents", type_hint=torch.Tensor, description="Packed image latents for conditioning. Shape: (B, img_seq_len, C)", ), InputParam( "image_latent_ids", type_hint=torch.Tensor, description="Position IDs for image latents. Shape: (B, img_seq_len, 4)", ), InputParam( "prompt_embeds", required=True, type_hint=torch.Tensor, description="Text embeddings from Qwen3", ), InputParam( "negative_prompt_embeds", required=False, type_hint=torch.Tensor, description="Negative text embeddings from Qwen3", ), InputParam( "txt_ids", required=True, type_hint=torch.Tensor, description="4D position IDs for text tokens (T, H, W, L)", ), InputParam( "negative_txt_ids", required=False, type_hint=torch.Tensor, description="4D position IDs for negative text tokens (T, H, W, L)", ), InputParam( "latent_ids", required=True, type_hint=torch.Tensor, description="4D position IDs for latent tokens (T, H, W, L)", ), ] @torch.no_grad() def __call__( self, components: Flux2KleinModularPipeline, block_state: BlockState, i: int, t: torch.Tensor ) -> PipelineState: latents = block_state.latents latent_model_input = latents.to(components.transformer.dtype) img_ids = block_state.latent_ids image_latents = getattr(block_state, "image_latents", None) if image_latents is not None: latent_model_input = torch.cat([latents, image_latents], dim=1).to(components.transformer.dtype) image_latent_ids = block_state.image_latent_ids img_ids = torch.cat([img_ids, image_latent_ids], dim=1) timestep = t.expand(latents.shape[0]).to(latents.dtype) guider_inputs = { "encoder_hidden_states": ( getattr(block_state, "prompt_embeds", None), getattr(block_state, "negative_prompt_embeds", None), ), "txt_ids": ( getattr(block_state, "txt_ids", None), getattr(block_state, "negative_txt_ids", None), ), } components.guider.set_state(step=i, num_inference_steps=block_state.num_inference_steps, timestep=t) guider_state = components.guider.prepare_inputs(guider_inputs) for guider_state_batch in guider_state: components.guider.prepare_models(components.transformer) cond_kwargs = {input_name: getattr(guider_state_batch, input_name) for input_name in guider_inputs.keys()} noise_pred = components.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, guidance=None, img_ids=img_ids, joint_attention_kwargs=block_state.joint_attention_kwargs, return_dict=False, **cond_kwargs, )[0] guider_state_batch.noise_pred = noise_pred[:, : latents.size(1)] components.guider.cleanup_models(components.transformer) # perform guidance block_state.noise_pred = components.guider(guider_state)[0] return components, block_state class Flux2LoopAfterDenoiser(ModularPipelineBlocks): model_name = "flux2" @property def expected_components(self) -> list[ComponentSpec]: return [ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler)] @property def description(self) -> str: return ( "Step within the denoising loop that updates the latents after denoising. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `Flux2DenoiseLoopWrapper`)" ) @property def inputs(self) -> list[tuple[str, Any]]: return [] @property def intermediate_inputs(self) -> list[str]: return [InputParam("generator")] @property def intermediate_outputs(self) -> list[OutputParam]: return [OutputParam("latents", type_hint=torch.Tensor, description="The denoised latents")] @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): latents_dtype = block_state.latents.dtype block_state.latents = components.scheduler.step( block_state.noise_pred, t, block_state.latents, return_dict=False, )[0] if block_state.latents.dtype != latents_dtype: if torch.backends.mps.is_available(): block_state.latents = block_state.latents.to(latents_dtype) return components, block_state class Flux2DenoiseLoopWrapper(LoopSequentialPipelineBlocks): model_name = "flux2" @property def description(self) -> str: return ( "Pipeline block that iteratively denoises the latents over `timesteps`. " "The specific steps within each iteration can be customized with `sub_blocks` attribute" ) @property def loop_expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), ComponentSpec("transformer", Flux2Transformer2DModel), ] @property def loop_inputs(self) -> list[InputParam]: return [ InputParam( "timesteps", required=True, type_hint=torch.Tensor, description="The timesteps to use for the denoising process.", ), InputParam( "num_inference_steps", required=True, type_hint=int, description="The number of inference steps to use for the denoising process.", ), ] @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.num_warmup_steps = max( len(block_state.timesteps) - block_state.num_inference_steps * components.scheduler.order, 0 ) with self.progress_bar(total=block_state.num_inference_steps) as progress_bar: for i, t in enumerate(block_state.timesteps): components, block_state = self.loop_step(components, block_state, i=i, t=t) if i == len(block_state.timesteps) - 1 or ( (i + 1) > block_state.num_warmup_steps and (i + 1) % components.scheduler.order == 0 ): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self.set_block_state(state, block_state) return components, state class Flux2DenoiseStep(Flux2DenoiseLoopWrapper): block_classes = [Flux2LoopDenoiser, Flux2LoopAfterDenoiser] block_names = ["denoiser", "after_denoiser"] @property def description(self) -> str: return ( "Denoise step that iteratively denoises the latents for Flux2. \n" "Its loop logic is defined in `Flux2DenoiseLoopWrapper.__call__` method \n" "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `Flux2LoopDenoiser`\n" " - `Flux2LoopAfterDenoiser`\n" "This block supports both text-to-image and image-conditioned generation." ) class Flux2KleinDenoiseStep(Flux2DenoiseLoopWrapper): block_classes = [Flux2KleinLoopDenoiser, Flux2LoopAfterDenoiser] block_names = ["denoiser", "after_denoiser"] @property def description(self) -> str: return ( "Denoise step that iteratively denoises the latents for Flux2. \n" "Its loop logic is defined in `Flux2DenoiseLoopWrapper.__call__` method \n" "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `Flux2KleinLoopDenoiser`\n" " - `Flux2LoopAfterDenoiser`\n" "This block supports both text-to-image and image-conditioned generation." ) class Flux2KleinBaseDenoiseStep(Flux2DenoiseLoopWrapper): block_classes = [Flux2KleinBaseLoopDenoiser, Flux2LoopAfterDenoiser] block_names = ["denoiser", "after_denoiser"] @property def description(self) -> str: return ( "Denoise step that iteratively denoises the latents for Flux2. \n" "Its loop logic is defined in `Flux2DenoiseLoopWrapper.__call__` method \n" "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `Flux2KleinBaseLoopDenoiser`\n" " - `Flux2LoopAfterDenoiser`\n" "This block supports both text-to-image and image-conditioned generation." )
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/flux2/denoise.py", "license": "Apache License 2.0", "lines": 434, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/flux2/encoders.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from transformers import AutoProcessor, Mistral3ForConditionalGeneration, Qwen2TokenizerFast, Qwen3ForCausalLM from ...configuration_utils import FrozenDict from ...guiders import ClassifierFreeGuidance from ...models import AutoencoderKLFlux2 from ...utils import logging from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, ConfigSpec, InputParam, OutputParam from .modular_pipeline import Flux2KleinModularPipeline, Flux2ModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name def format_text_input(prompts: list[str], system_message: str = None): """Format prompts for Mistral3 chat template.""" cleaned_txt = [prompt.replace("[IMG]", "") for prompt in prompts] return [ [ { "role": "system", "content": [{"type": "text", "text": system_message}], }, {"role": "user", "content": [{"type": "text", "text": prompt}]}, ] for prompt in cleaned_txt ] # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class Flux2TextEncoderStep(ModularPipelineBlocks): model_name = "flux2" # fmt: off DEFAULT_SYSTEM_MESSAGE = "You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object attribution and actions without speculation." # fmt: on @property def description(self) -> str: return "Text Encoder step that generates text embeddings using Mistral3 to guide the image generation" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("text_encoder", Mistral3ForConditionalGeneration), ComponentSpec("tokenizer", AutoProcessor), ] @property def inputs(self) -> list[InputParam]: return [ InputParam("prompt"), InputParam("max_sequence_length", type_hint=int, default=512, required=False), InputParam("text_encoder_out_layers", type_hint=tuple[int], default=(10, 20, 30), required=False), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "prompt_embeds", kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="Text embeddings from Mistral3 used to guide the image generation", ), ] @staticmethod def check_inputs(block_state): prompt = block_state.prompt if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") @staticmethod def _get_mistral_3_prompt_embeds( text_encoder: Mistral3ForConditionalGeneration, tokenizer: AutoProcessor, prompt: str | list[str], dtype: torch.dtype | None = None, device: torch.device | None = None, max_sequence_length: int = 512, # fmt: off system_message: str = "You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object attribution and actions without speculation.", # fmt: on hidden_states_layers: tuple[int] = (10, 20, 30), ): dtype = text_encoder.dtype if dtype is None else dtype device = text_encoder.device if device is None else device prompt = [prompt] if isinstance(prompt, str) else prompt messages_batch = format_text_input(prompts=prompt, system_message=system_message) inputs = tokenizer.apply_chat_template( messages_batch, add_generation_prompt=False, tokenize=True, return_dict=True, return_tensors="pt", padding="max_length", truncation=True, max_length=max_sequence_length, ) input_ids = inputs["input_ids"].to(device) attention_mask = inputs["attention_mask"].to(device) output = text_encoder( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, use_cache=False, ) out = torch.stack([output.hidden_states[k] for k in hidden_states_layers], dim=1) out = out.to(dtype=dtype, device=device) batch_size, num_channels, seq_len, hidden_dim = out.shape prompt_embeds = out.permute(0, 2, 1, 3).reshape(batch_size, seq_len, num_channels * hidden_dim) return prompt_embeds @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) self.check_inputs(block_state) block_state.device = components._execution_device prompt = block_state.prompt if prompt is None: prompt = "" prompt = [prompt] if isinstance(prompt, str) else prompt block_state.prompt_embeds = self._get_mistral_3_prompt_embeds( text_encoder=components.text_encoder, tokenizer=components.tokenizer, prompt=prompt, device=block_state.device, max_sequence_length=block_state.max_sequence_length, system_message=self.DEFAULT_SYSTEM_MESSAGE, hidden_states_layers=block_state.text_encoder_out_layers, ) self.set_block_state(state, block_state) return components, state class Flux2RemoteTextEncoderStep(ModularPipelineBlocks): model_name = "flux2" REMOTE_URL = "https://remote-text-encoder-flux-2.huggingface.co/predict" @property def description(self) -> str: return "Text Encoder step that generates text embeddings using a remote API endpoint" @property def expected_components(self) -> list[ComponentSpec]: return [] @property def inputs(self) -> list[InputParam]: return [ InputParam("prompt"), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "prompt_embeds", kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="Text embeddings from remote API used to guide the image generation", ), ] @staticmethod def check_inputs(block_state): prompt = block_state.prompt if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(block_state.prompt)}") @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: import io import requests from huggingface_hub import get_token block_state = self.get_block_state(state) self.check_inputs(block_state) block_state.device = components._execution_device prompt = block_state.prompt if prompt is None: prompt = "" prompt = [prompt] if isinstance(prompt, str) else prompt response = requests.post( self.REMOTE_URL, json={"prompt": prompt}, headers={ "Authorization": f"Bearer {get_token()}", "Content-Type": "application/json", }, ) response.raise_for_status() block_state.prompt_embeds = torch.load(io.BytesIO(response.content), weights_only=True) block_state.prompt_embeds = block_state.prompt_embeds.to(block_state.device) self.set_block_state(state, block_state) return components, state class Flux2KleinTextEncoderStep(ModularPipelineBlocks): model_name = "flux2-klein" @property def description(self) -> str: return "Text Encoder step that generates text embeddings using Qwen3 to guide the image generation" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("text_encoder", Qwen3ForCausalLM), ComponentSpec("tokenizer", Qwen2TokenizerFast), ] @property def expected_configs(self) -> list[ConfigSpec]: return [ ConfigSpec(name="is_distilled", default=True), ] @property def inputs(self) -> list[InputParam]: return [ InputParam("prompt"), InputParam("max_sequence_length", type_hint=int, default=512, required=False), InputParam("text_encoder_out_layers", type_hint=tuple[int], default=(9, 18, 27), required=False), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "prompt_embeds", kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="Text embeddings from qwen3 used to guide the image generation", ), ] @staticmethod def check_inputs(block_state): prompt = block_state.prompt if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") @staticmethod # Copied from diffusers.pipelines.flux2.pipeline_flux2_klein.Flux2KleinPipeline._get_qwen3_prompt_embeds def _get_qwen3_prompt_embeds( text_encoder: Qwen3ForCausalLM, tokenizer: Qwen2TokenizerFast, prompt: str | list[str], dtype: torch.dtype | None = None, device: torch.device | None = None, max_sequence_length: int = 512, hidden_states_layers: list[int] = (9, 18, 27), ): dtype = text_encoder.dtype if dtype is None else dtype device = text_encoder.device if device is None else device prompt = [prompt] if isinstance(prompt, str) else prompt all_input_ids = [] all_attention_masks = [] for single_prompt in prompt: messages = [{"role": "user", "content": single_prompt}] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=False, ) inputs = tokenizer( text, return_tensors="pt", padding="max_length", truncation=True, max_length=max_sequence_length, ) all_input_ids.append(inputs["input_ids"]) all_attention_masks.append(inputs["attention_mask"]) input_ids = torch.cat(all_input_ids, dim=0).to(device) attention_mask = torch.cat(all_attention_masks, dim=0).to(device) # Forward pass through the model output = text_encoder( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, use_cache=False, ) # Only use outputs from intermediate layers and stack them out = torch.stack([output.hidden_states[k] for k in hidden_states_layers], dim=1) out = out.to(dtype=dtype, device=device) batch_size, num_channels, seq_len, hidden_dim = out.shape prompt_embeds = out.permute(0, 2, 1, 3).reshape(batch_size, seq_len, num_channels * hidden_dim) return prompt_embeds @torch.no_grad() def __call__(self, components: Flux2KleinModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) self.check_inputs(block_state) device = components._execution_device prompt = block_state.prompt if prompt is None: prompt = "" prompt = [prompt] if isinstance(prompt, str) else prompt block_state.prompt_embeds = self._get_qwen3_prompt_embeds( text_encoder=components.text_encoder, tokenizer=components.tokenizer, prompt=prompt, device=device, max_sequence_length=block_state.max_sequence_length, hidden_states_layers=block_state.text_encoder_out_layers, ) self.set_block_state(state, block_state) return components, state class Flux2KleinBaseTextEncoderStep(ModularPipelineBlocks): model_name = "flux2-klein" @property def description(self) -> str: return "Text Encoder step that generates text embeddings using Qwen3 to guide the image generation" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("text_encoder", Qwen3ForCausalLM), ComponentSpec("tokenizer", Qwen2TokenizerFast), ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 4.0}), default_creation_method="from_config", ), ] @property def expected_configs(self) -> list[ConfigSpec]: return [ ConfigSpec(name="is_distilled", default=False), ] @property def inputs(self) -> list[InputParam]: return [ InputParam("prompt"), InputParam("max_sequence_length", type_hint=int, default=512, required=False), InputParam("text_encoder_out_layers", type_hint=tuple[int], default=(9, 18, 27), required=False), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "prompt_embeds", kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="Text embeddings from qwen3 used to guide the image generation", ), OutputParam( "negative_prompt_embeds", kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="Negative text embeddings from qwen3 used to guide the image generation", ), ] @staticmethod def check_inputs(block_state): prompt = block_state.prompt if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") @staticmethod # Copied from diffusers.pipelines.flux2.pipeline_flux2_klein.Flux2KleinPipeline._get_qwen3_prompt_embeds def _get_qwen3_prompt_embeds( text_encoder: Qwen3ForCausalLM, tokenizer: Qwen2TokenizerFast, prompt: str | list[str], dtype: torch.dtype | None = None, device: torch.device | None = None, max_sequence_length: int = 512, hidden_states_layers: list[int] = (9, 18, 27), ): dtype = text_encoder.dtype if dtype is None else dtype device = text_encoder.device if device is None else device prompt = [prompt] if isinstance(prompt, str) else prompt all_input_ids = [] all_attention_masks = [] for single_prompt in prompt: messages = [{"role": "user", "content": single_prompt}] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=False, ) inputs = tokenizer( text, return_tensors="pt", padding="max_length", truncation=True, max_length=max_sequence_length, ) all_input_ids.append(inputs["input_ids"]) all_attention_masks.append(inputs["attention_mask"]) input_ids = torch.cat(all_input_ids, dim=0).to(device) attention_mask = torch.cat(all_attention_masks, dim=0).to(device) # Forward pass through the model output = text_encoder( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, use_cache=False, ) # Only use outputs from intermediate layers and stack them out = torch.stack([output.hidden_states[k] for k in hidden_states_layers], dim=1) out = out.to(dtype=dtype, device=device) batch_size, num_channels, seq_len, hidden_dim = out.shape prompt_embeds = out.permute(0, 2, 1, 3).reshape(batch_size, seq_len, num_channels * hidden_dim) return prompt_embeds @torch.no_grad() def __call__(self, components: Flux2KleinModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) self.check_inputs(block_state) device = components._execution_device prompt = block_state.prompt if prompt is None: prompt = "" prompt = [prompt] if isinstance(prompt, str) else prompt block_state.prompt_embeds = self._get_qwen3_prompt_embeds( text_encoder=components.text_encoder, tokenizer=components.tokenizer, prompt=prompt, device=device, max_sequence_length=block_state.max_sequence_length, hidden_states_layers=block_state.text_encoder_out_layers, ) if components.requires_unconditional_embeds: negative_prompt = [""] * len(prompt) block_state.negative_prompt_embeds = self._get_qwen3_prompt_embeds( text_encoder=components.text_encoder, tokenizer=components.tokenizer, prompt=negative_prompt, device=device, max_sequence_length=block_state.max_sequence_length, hidden_states_layers=block_state.text_encoder_out_layers, ) else: block_state.negative_prompt_embeds = None self.set_block_state(state, block_state) return components, state class Flux2VaeEncoderStep(ModularPipelineBlocks): model_name = "flux2" @property def description(self) -> str: return "VAE Encoder step that encodes preprocessed images into latent representations for Flux2." @property def expected_components(self) -> list[ComponentSpec]: return [ComponentSpec("vae", AutoencoderKLFlux2)] @property def inputs(self) -> list[InputParam]: return [ InputParam("condition_images", type_hint=list[torch.Tensor]), InputParam("generator"), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "image_latents", type_hint=list[torch.Tensor], description="List of latent representations for each reference image", ), ] @staticmethod def _patchify_latents(latents): """Convert latents to patchified format for Flux2.""" batch_size, num_channels_latents, height, width = latents.shape latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 1, 3, 5, 2, 4) latents = latents.reshape(batch_size, num_channels_latents * 4, height // 2, width // 2) return latents def _encode_vae_image(self, vae: AutoencoderKLFlux2, image: torch.Tensor, generator: torch.Generator): """Encode a single image using Flux2 VAE with batch norm normalization.""" if image.ndim != 4: raise ValueError(f"Expected image dims 4, got {image.ndim}.") image_latents = retrieve_latents(vae.encode(image), generator=generator, sample_mode="argmax") image_latents = self._patchify_latents(image_latents) latents_bn_mean = vae.bn.running_mean.view(1, -1, 1, 1).to(image_latents.device, image_latents.dtype) latents_bn_std = torch.sqrt(vae.bn.running_var.view(1, -1, 1, 1) + vae.config.batch_norm_eps) latents_bn_std = latents_bn_std.to(image_latents.device, image_latents.dtype) image_latents = (image_latents - latents_bn_mean) / latents_bn_std return image_latents @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) condition_images = block_state.condition_images if condition_images is None: return components, state device = components._execution_device dtype = components.vae.dtype image_latents = [] for image in condition_images: image = image.to(device=device, dtype=dtype) latent = self._encode_vae_image( vae=components.vae, image=image, generator=block_state.generator, ) image_latents.append(latent) block_state.image_latents = image_latents self.set_block_state(state, block_state) return components, state
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/flux2/encoders.py", "license": "Apache License 2.0", "lines": 494, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/flux2/inputs.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ...configuration_utils import FrozenDict from ...pipelines.flux2.image_processor import Flux2ImageProcessor from ...utils import logging from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam from .modular_pipeline import Flux2ModularPipeline logger = logging.get_logger(__name__) class Flux2TextInputStep(ModularPipelineBlocks): model_name = "flux2" @property def description(self) -> str: return ( "This step:\n" " 1. Determines `batch_size` and `dtype` based on `prompt_embeds`\n" " 2. Ensures all text embeddings have consistent batch sizes (batch_size * num_images_per_prompt)" ) @property def inputs(self) -> list[InputParam]: return [ InputParam("num_images_per_prompt", default=1), InputParam( "prompt_embeds", required=True, kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="Pre-generated text embeddings. Can be generated from text_encoder step.", ), ] @property def intermediate_outputs(self) -> list[str]: return [ OutputParam( "batch_size", type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt", ), OutputParam( "dtype", type_hint=torch.dtype, description="Data type of model tensor inputs (determined by `prompt_embeds`)", ), OutputParam( "prompt_embeds", type_hint=torch.Tensor, kwargs_type="denoiser_input_fields", description="Text embeddings used to guide the image generation", ), ] @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.batch_size = block_state.prompt_embeds.shape[0] block_state.dtype = block_state.prompt_embeds.dtype _, seq_len, _ = block_state.prompt_embeds.shape block_state.prompt_embeds = block_state.prompt_embeds.repeat(1, block_state.num_images_per_prompt, 1) block_state.prompt_embeds = block_state.prompt_embeds.view( block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1 ) self.set_block_state(state, block_state) return components, state class Flux2KleinBaseTextInputStep(ModularPipelineBlocks): model_name = "flux2-klein" @property def description(self) -> str: return ( "This step:\n" " 1. Determines `batch_size` and `dtype` based on `prompt_embeds`\n" " 2. Ensures all text embeddings have consistent batch sizes (batch_size * num_images_per_prompt)" ) @property def inputs(self) -> list[InputParam]: return [ InputParam("num_images_per_prompt", default=1), InputParam( "prompt_embeds", required=True, kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="Pre-generated text embeddings. Can be generated from text_encoder step.", ), InputParam( "negative_prompt_embeds", required=False, kwargs_type="denoiser_input_fields", type_hint=torch.Tensor, description="Pre-generated negative text embeddings. Can be generated from text_encoder step.", ), ] @property def intermediate_outputs(self) -> list[str]: return [ OutputParam( "batch_size", type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt", ), OutputParam( "dtype", type_hint=torch.dtype, description="Data type of model tensor inputs (determined by `prompt_embeds`)", ), OutputParam( "prompt_embeds", type_hint=torch.Tensor, kwargs_type="denoiser_input_fields", description="Text embeddings used to guide the image generation", ), OutputParam( "negative_prompt_embeds", type_hint=torch.Tensor, kwargs_type="denoiser_input_fields", description="Negative text embeddings used to guide the image generation", ), ] @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.batch_size = block_state.prompt_embeds.shape[0] block_state.dtype = block_state.prompt_embeds.dtype _, seq_len, _ = block_state.prompt_embeds.shape block_state.prompt_embeds = block_state.prompt_embeds.repeat(1, block_state.num_images_per_prompt, 1) block_state.prompt_embeds = block_state.prompt_embeds.view( block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1 ) if block_state.negative_prompt_embeds is not None: _, seq_len, _ = block_state.negative_prompt_embeds.shape block_state.negative_prompt_embeds = block_state.negative_prompt_embeds.repeat( 1, block_state.num_images_per_prompt, 1 ) block_state.negative_prompt_embeds = block_state.negative_prompt_embeds.view( block_state.batch_size * block_state.num_images_per_prompt, seq_len, -1 ) self.set_block_state(state, block_state) return components, state class Flux2ProcessImagesInputStep(ModularPipelineBlocks): model_name = "flux2" @property def description(self) -> str: return "Image preprocess step for Flux2. Validates and preprocesses reference images." @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec( "image_processor", Flux2ImageProcessor, config=FrozenDict({"vae_scale_factor": 16, "vae_latent_channels": 32}), default_creation_method="from_config", ), ] @property def inputs(self) -> list[InputParam]: return [ InputParam("image"), InputParam("height"), InputParam("width"), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [OutputParam(name="condition_images", type_hint=list[torch.Tensor])] @torch.no_grad() def __call__(self, components: Flux2ModularPipeline, state: PipelineState): block_state = self.get_block_state(state) images = block_state.image if images is None: block_state.condition_images = None self.set_block_state(state, block_state) return components, state if not isinstance(images, list): images = [images] condition_images = [] for img in images: components.image_processor.check_image_input(img) image_width, image_height = img.size if image_width * image_height > 1024 * 1024: img = components.image_processor._resize_to_target_area(img, 1024 * 1024) image_width, image_height = img.size multiple_of = components.vae_scale_factor * 2 image_width = (image_width // multiple_of) * multiple_of image_height = (image_height // multiple_of) * multiple_of condition_img = components.image_processor.preprocess( img, height=image_height, width=image_width, resize_mode="crop" ) condition_images.append(condition_img) if block_state.height is None: block_state.height = image_height if block_state.width is None: block_state.width = image_width block_state.condition_images = condition_images self.set_block_state(state, block_state) return components, state
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/flux2/inputs.py", "license": "Apache License 2.0", "lines": 204, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/flux2/modular_pipeline.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...loaders import Flux2LoraLoaderMixin from ...utils import logging from ..modular_pipeline import ModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name class Flux2ModularPipeline(ModularPipeline, Flux2LoraLoaderMixin): """ A ModularPipeline for Flux2. > [!WARNING] > This is an experimental feature and is likely to change in the future. """ default_blocks_name = "Flux2AutoBlocks" @property def default_height(self): return self.default_sample_size * self.vae_scale_factor @property def default_width(self): return self.default_sample_size * self.vae_scale_factor @property def default_sample_size(self): return 128 @property def vae_scale_factor(self): vae_scale_factor = 8 if getattr(self, "vae", None) is not None: vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) return vae_scale_factor @property def num_channels_latents(self): num_channels_latents = 32 if getattr(self, "transformer", None): num_channels_latents = self.transformer.config.in_channels // 4 return num_channels_latents class Flux2KleinModularPipeline(Flux2ModularPipeline): """ A ModularPipeline for Flux2-Klein (distilled model). > [!WARNING] > This is an experimental feature and is likely to change in the future. """ default_blocks_name = "Flux2KleinAutoBlocks" @property def requires_unconditional_embeds(self): if hasattr(self.config, "is_distilled") and self.config.is_distilled: return False requires_unconditional_embeds = False if hasattr(self, "guider") and self.guider is not None: requires_unconditional_embeds = self.guider._enabled and self.guider.num_conditions > 1 return requires_unconditional_embeds class Flux2KleinBaseModularPipeline(Flux2ModularPipeline): """ A ModularPipeline for Flux2-Klein (base model). > [!WARNING] > This is an experimental feature and is likely to change in the future. """ default_blocks_name = "Flux2KleinBaseAutoBlocks" @property def requires_unconditional_embeds(self): if hasattr(self.config, "is_distilled") and self.config.is_distilled: return False requires_unconditional_embeds = False if hasattr(self, "guider") and self.guider is not None: requires_unconditional_embeds = self.guider._enabled and self.guider.num_conditions > 1 return requires_unconditional_embeds
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/flux2/modular_pipeline.py", "license": "Apache License 2.0", "lines": 72, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/modular_pipelines/flux2/test_modular_pipeline_flux2.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import numpy as np import PIL import pytest from diffusers.modular_pipelines import ( Flux2AutoBlocks, Flux2ModularPipeline, ) from ...testing_utils import floats_tensor, torch_device from ..test_modular_pipelines_common import ModularPipelineTesterMixin FLUX2_TEXT2IMAGE_WORKFLOWS = { "text2image": [ ("text_encoder", "Flux2TextEncoderStep"), ("denoise.input", "Flux2TextInputStep"), ("denoise.prepare_latents", "Flux2PrepareLatentsStep"), ("denoise.set_timesteps", "Flux2SetTimestepsStep"), ("denoise.prepare_guidance", "Flux2PrepareGuidanceStep"), ("denoise.prepare_rope_inputs", "Flux2RoPEInputsStep"), ("denoise.denoise", "Flux2DenoiseStep"), ("denoise.after_denoise", "Flux2UnpackLatentsStep"), ("decode", "Flux2DecodeStep"), ], } class TestFlux2ModularPipelineFast(ModularPipelineTesterMixin): pipeline_class = Flux2ModularPipeline pipeline_blocks_class = Flux2AutoBlocks pretrained_model_name_or_path = "hf-internal-testing/tiny-flux2-modular" params = frozenset(["prompt", "height", "width", "guidance_scale"]) batch_params = frozenset(["prompt"]) expected_workflow_blocks = FLUX2_TEXT2IMAGE_WORKFLOWS def get_dummy_inputs(self, seed=0): generator = self.get_generator(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", # TODO (Dhruv): Update text encoder config so that vocab_size matches tokenizer "max_sequence_length": 8, # bit of a hack to workaround vocab size mismatch "text_encoder_out_layers": (1,), "generator": generator, "num_inference_steps": 2, "guidance_scale": 4.0, "height": 32, "width": 32, "output_type": "pt", } return inputs def test_float16_inference(self): super().test_float16_inference(9e-2) FLUX2_IMAGE_CONDITIONED_WORKFLOWS = { "image_conditioned": [ ("text_encoder", "Flux2TextEncoderStep"), ("vae_encoder.preprocess", "Flux2ProcessImagesInputStep"), ("vae_encoder.encode", "Flux2VaeEncoderStep"), ("denoise.input", "Flux2TextInputStep"), ("denoise.prepare_image_latents", "Flux2PrepareImageLatentsStep"), ("denoise.prepare_latents", "Flux2PrepareLatentsStep"), ("denoise.set_timesteps", "Flux2SetTimestepsStep"), ("denoise.prepare_guidance", "Flux2PrepareGuidanceStep"), ("denoise.prepare_rope_inputs", "Flux2RoPEInputsStep"), ("denoise.denoise", "Flux2DenoiseStep"), ("denoise.after_denoise", "Flux2UnpackLatentsStep"), ("decode", "Flux2DecodeStep"), ], } class TestFlux2ImageConditionedModularPipelineFast(ModularPipelineTesterMixin): pipeline_class = Flux2ModularPipeline pipeline_blocks_class = Flux2AutoBlocks pretrained_model_name_or_path = "hf-internal-testing/tiny-flux2-modular" params = frozenset(["prompt", "height", "width", "guidance_scale", "image"]) batch_params = frozenset(["prompt", "image"]) expected_workflow_blocks = FLUX2_IMAGE_CONDITIONED_WORKFLOWS def get_dummy_inputs(self, seed=0): generator = self.get_generator(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", # TODO (Dhruv): Update text encoder config so that vocab_size matches tokenizer "max_sequence_length": 8, # bit of a hack to workaround vocab size mismatch "text_encoder_out_layers": (1,), "generator": generator, "num_inference_steps": 2, "guidance_scale": 4.0, "height": 32, "width": 32, "output_type": "pt", } image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(torch_device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = PIL.Image.fromarray(np.uint8(image * 255)).convert("RGB") inputs["image"] = init_image return inputs def test_float16_inference(self): super().test_float16_inference(9e-2) @pytest.mark.skip(reason="batched inference is currently not supported") def test_inference_batch_single_identical(self, batch_size=2, expected_max_diff=0.0001): return
{ "repo_id": "huggingface/diffusers", "file_path": "tests/modular_pipelines/flux2/test_modular_pipeline_flux2.py", "license": "Apache License 2.0", "lines": 108, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/modular_pipelines/z_image/before_denoise.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import torch from ...models import ZImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging from ...utils.torch_utils import randn_tensor from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam from .modular_pipeline import ZImageModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name # TODO(yiyi, aryan): We need another step before text encoder to set the `num_inference_steps` attribute for guider so that # things like when to do guidance and how many conditions to be prepared can be determined. Currently, this is done by # always assuming you want to do guidance in the Guiders. So, negative embeddings are prepared regardless of what the # configuration of guider is. def repeat_tensor_to_batch_size( input_name: str, input_tensor: torch.Tensor, batch_size: int, num_images_per_prompt: int = 1, ) -> torch.Tensor: """Repeat tensor elements to match the final batch size. This function expands a tensor's batch dimension to match the final batch size (batch_size * num_images_per_prompt) by repeating each element along dimension 0. The input tensor must have batch size 1 or batch_size. The function will: - If batch size is 1: repeat each element (batch_size * num_images_per_prompt) times - If batch size equals batch_size: repeat each element num_images_per_prompt times Args: input_name (str): Name of the input tensor (used for error messages) input_tensor (torch.Tensor): The tensor to repeat. Must have batch size 1 or batch_size. batch_size (int): The base batch size (number of prompts) num_images_per_prompt (int, optional): Number of images to generate per prompt. Defaults to 1. Returns: torch.Tensor: The repeated tensor with final batch size (batch_size * num_images_per_prompt) Raises: ValueError: If input_tensor is not a torch.Tensor or has invalid batch size Examples: tensor = torch.tensor([[1, 2, 3]]) # shape: [1, 3] repeated = repeat_tensor_to_batch_size("image", tensor, batch_size=2, num_images_per_prompt=2) repeated # tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) - shape: [4, 3] tensor = torch.tensor([[1, 2, 3], [4, 5, 6]]) # shape: [2, 3] repeated = repeat_tensor_to_batch_size("image", tensor, batch_size=2, num_images_per_prompt=2) repeated # tensor([[1, 2, 3], [1, 2, 3], [4, 5, 6], [4, 5, 6]]) - shape: [4, 3] """ # make sure input is a tensor if not isinstance(input_tensor, torch.Tensor): raise ValueError(f"`{input_name}` must be a tensor") # make sure input tensor e.g. image_latents has batch size 1 or batch_size same as prompts if input_tensor.shape[0] == 1: repeat_by = batch_size * num_images_per_prompt elif input_tensor.shape[0] == batch_size: repeat_by = num_images_per_prompt else: raise ValueError( f"`{input_name}` must have have batch size 1 or {batch_size}, but got {input_tensor.shape[0]}" ) # expand the tensor to match the batch_size * num_images_per_prompt input_tensor = input_tensor.repeat_interleave(repeat_by, dim=0) return input_tensor def calculate_dimension_from_latents(latents: torch.Tensor, vae_scale_factor_spatial: int) -> tuple[int, int]: """Calculate image dimensions from latent tensor dimensions. This function converts latent spatial dimensions to image spatial dimensions by multiplying the latent height/width by the VAE scale factor. Args: latents (torch.Tensor): The latent tensor. Must have 4 dimensions. Expected shapes: [batch, channels, height, width] vae_scale_factor (int): The scale factor used by the VAE to compress image spatial dimension. By default, it is 16 Returns: tuple[int, int]: The calculated image dimensions as (height, width) """ latent_height, latent_width = latents.shape[2:] height = latent_height * vae_scale_factor_spatial // 2 width = latent_width * vae_scale_factor_spatial // 2 return height, width # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class ZImageTextInputStep(ModularPipelineBlocks): model_name = "z-image" @property def description(self) -> str: return ( "Input processing step that:\n" " 1. Determines `batch_size` and `dtype` based on `prompt_embeds`\n" " 2. Adjusts input tensor shapes based on `batch_size` (number of prompts) and `num_images_per_prompt`\n\n" "All input tensors are expected to have either batch_size=1 or match the batch_size\n" "of prompt_embeds. The tensors will be duplicated across the batch dimension to\n" "have a final batch_size of batch_size * num_images_per_prompt." ) @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("transformer", ZImageTransformer2DModel), ] @property def inputs(self) -> list[InputParam]: return [ InputParam("num_images_per_prompt", default=1), InputParam( "prompt_embeds", required=True, type_hint=list[torch.Tensor], description="Pre-generated text embeddings. Can be generated from text_encoder step.", ), InputParam( "negative_prompt_embeds", type_hint=list[torch.Tensor], description="Pre-generated negative text embeddings. Can be generated from text_encoder step.", ), ] @property def intermediate_outputs(self) -> list[str]: return [ OutputParam( "batch_size", type_hint=int, description="Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt", ), OutputParam( "dtype", type_hint=torch.dtype, description="Data type of model tensor inputs (determined by `transformer.dtype`)", ), ] def check_inputs(self, components, block_state): if block_state.prompt_embeds is not None and block_state.negative_prompt_embeds is not None: if not isinstance(block_state.prompt_embeds, list): raise ValueError( f"`prompt_embeds` must be a list when passed directly, but got {type(block_state.prompt_embeds)}." ) if not isinstance(block_state.negative_prompt_embeds, list): raise ValueError( f"`negative_prompt_embeds` must be a list when passed directly, but got {type(block_state.negative_prompt_embeds)}." ) if len(block_state.prompt_embeds) != len(block_state.negative_prompt_embeds): raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same length when passed directly, but" f" got: `prompt_embeds` {len(block_state.prompt_embeds)} != `negative_prompt_embeds`" f" {len(block_state.negative_prompt_embeds)}." ) @torch.no_grad() def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) self.check_inputs(components, block_state) block_state.batch_size = len(block_state.prompt_embeds) block_state.dtype = block_state.prompt_embeds[0].dtype if block_state.num_images_per_prompt > 1: prompt_embeds = [pe for pe in block_state.prompt_embeds for _ in range(block_state.num_images_per_prompt)] block_state.prompt_embeds = prompt_embeds if block_state.negative_prompt_embeds is not None: negative_prompt_embeds = [ npe for npe in block_state.negative_prompt_embeds for _ in range(block_state.num_images_per_prompt) ] block_state.negative_prompt_embeds = negative_prompt_embeds self.set_block_state(state, block_state) return components, state class ZImageAdditionalInputsStep(ModularPipelineBlocks): model_name = "z-image" def __init__( self, image_latent_inputs: list[str] = ["image_latents"], additional_batch_inputs: list[str] = [], ): """Initialize a configurable step that standardizes the inputs for the denoising step. It:\n" This step handles multiple common tasks to prepare inputs for the denoising step: 1. For encoded image latents, use it update height/width if None, and expands batch size 2. For additional_batch_inputs: Only expands batch dimensions to match final batch size This is a dynamic block that allows you to configure which inputs to process. Args: image_latent_inputs (list[str], optional): Names of image latent tensors to process. In additional to adjust batch size of these inputs, they will be used to determine height/width. Can be a single string or list of strings. Defaults to ["image_latents"]. additional_batch_inputs (list[str], optional): Names of additional conditional input tensors to expand batch size. These tensors will only have their batch dimensions adjusted to match the final batch size. Can be a single string or list of strings. Defaults to []. Examples: # Configure to process image_latents (default behavior) ZImageAdditionalInputsStep() # Configure to process multiple image latent inputs ZImageAdditionalInputsStep(image_latent_inputs=["image_latents", "control_image_latents"]) # Configure to process image latents and additional batch inputs ZImageAdditionalInputsStep( image_latent_inputs=["image_latents"], additional_batch_inputs=["image_embeds"] ) """ if not isinstance(image_latent_inputs, list): image_latent_inputs = [image_latent_inputs] if not isinstance(additional_batch_inputs, list): additional_batch_inputs = [additional_batch_inputs] self._image_latent_inputs = image_latent_inputs self._additional_batch_inputs = additional_batch_inputs super().__init__() @property def description(self) -> str: # Functionality section summary_section = ( "Input processing step that:\n" " 1. For image latent inputs: Updates height/width if None, and expands batch size\n" " 2. For additional batch inputs: Expands batch dimensions to match final batch size" ) # Inputs info inputs_info = "" if self._image_latent_inputs or self._additional_batch_inputs: inputs_info = "\n\nConfigured inputs:" if self._image_latent_inputs: inputs_info += f"\n - Image latent inputs: {self._image_latent_inputs}" if self._additional_batch_inputs: inputs_info += f"\n - Additional batch inputs: {self._additional_batch_inputs}" # Placement guidance placement_section = "\n\nThis block should be placed after the encoder steps and the text input step." return summary_section + inputs_info + placement_section @property def inputs(self) -> list[InputParam]: inputs = [ InputParam(name="num_images_per_prompt", default=1), InputParam(name="batch_size", required=True), InputParam(name="height"), InputParam(name="width"), ] # Add image latent inputs for image_latent_input_name in self._image_latent_inputs: inputs.append(InputParam(name=image_latent_input_name)) # Add additional batch inputs for input_name in self._additional_batch_inputs: inputs.append(InputParam(name=input_name)) return inputs def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) # Process image latent inputs (height/width calculation, patchify, and batch expansion) for image_latent_input_name in self._image_latent_inputs: image_latent_tensor = getattr(block_state, image_latent_input_name) if image_latent_tensor is None: continue # 1. Calculate num_frames, height/width from latents height, width = calculate_dimension_from_latents(image_latent_tensor, components.vae_scale_factor_spatial) block_state.height = block_state.height or height block_state.width = block_state.width or width # Process additional batch inputs (only batch expansion) for input_name in self._additional_batch_inputs: input_tensor = getattr(block_state, input_name) if input_tensor is None: continue # Only expand batch size input_tensor = repeat_tensor_to_batch_size( input_name=input_name, input_tensor=input_tensor, num_images_per_prompt=block_state.num_images_per_prompt, batch_size=block_state.batch_size, ) setattr(block_state, input_name, input_tensor) self.set_block_state(state, block_state) return components, state class ZImagePrepareLatentsStep(ModularPipelineBlocks): model_name = "z-image" @property def description(self) -> str: return "Prepare latents step that prepares the latents for the text-to-video generation process" @property def inputs(self) -> list[InputParam]: return [ InputParam("height", type_hint=int), InputParam("width", type_hint=int), InputParam("latents", type_hint=torch.Tensor | None), InputParam("num_images_per_prompt", type_hint=int, default=1), InputParam("generator"), InputParam( "batch_size", required=True, type_hint=int, description="Number of prompts, the final batch size of model inputs should be `batch_size * num_images_per_prompt`. Can be generated in input step.", ), InputParam("dtype", type_hint=torch.dtype, description="The dtype of the model inputs"), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "latents", type_hint=torch.Tensor, description="The initial latents to use for the denoising process" ) ] def check_inputs(self, components, block_state): if (block_state.height is not None and block_state.height % components.vae_scale_factor_spatial != 0) or ( block_state.width is not None and block_state.width % components.vae_scale_factor_spatial != 0 ): raise ValueError( f"`height` and `width` have to be divisible by {components.vae_scale_factor_spatial} but are {block_state.height} and {block_state.width}." ) @staticmethod # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline.prepare_latents with self->comp def prepare_latents( comp, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): height = 2 * (int(height) // (comp.vae_scale_factor * 2)) width = 2 * (int(width) // (comp.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) return latents @torch.no_grad() def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) self.check_inputs(components, block_state) device = components._execution_device dtype = torch.float32 block_state.height = block_state.height or components.default_height block_state.width = block_state.width or components.default_width block_state.latents = self.prepare_latents( components, batch_size=block_state.batch_size * block_state.num_images_per_prompt, num_channels_latents=components.num_channels_latents, height=block_state.height, width=block_state.width, dtype=dtype, device=device, generator=block_state.generator, latents=block_state.latents, ) self.set_block_state(state, block_state) return components, state class ZImageSetTimestepsStep(ModularPipelineBlocks): model_name = "z-image" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), ] @property def description(self) -> str: return "Step that sets the scheduler's timesteps for inference. Need to run after prepare latents step." @property def inputs(self) -> list[InputParam]: return [ InputParam("latents", required=True), InputParam("num_inference_steps", default=9), InputParam("sigmas"), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "timesteps", type_hint=torch.Tensor, description="The timesteps to use for the denoising process" ), ] @torch.no_grad() def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) device = components._execution_device latent_height, latent_width = block_state.latents.shape[2], block_state.latents.shape[3] image_seq_len = (latent_height // 2) * (latent_width // 2) # sequence length after patchify mu = calculate_shift( image_seq_len, base_seq_len=components.scheduler.config.get("base_image_seq_len", 256), max_seq_len=components.scheduler.config.get("max_image_seq_len", 4096), base_shift=components.scheduler.config.get("base_shift", 0.5), max_shift=components.scheduler.config.get("max_shift", 1.15), ) components.scheduler.sigma_min = 0.0 block_state.timesteps, block_state.num_inference_steps = retrieve_timesteps( components.scheduler, block_state.num_inference_steps, device, sigmas=block_state.sigmas, mu=mu, ) self.set_block_state(state, block_state) return components, state class ZImageSetTimestepsWithStrengthStep(ModularPipelineBlocks): model_name = "z-image" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), ] @property def description(self) -> str: return "Step that sets the scheduler's timesteps for inference with strength. Need to run after set timesteps step." @property def inputs(self) -> list[InputParam]: return [ InputParam("timesteps", required=True), InputParam("num_inference_steps", required=True), InputParam("strength", default=0.6), ] def check_inputs(self, components, block_state): if block_state.strength < 0.0 or block_state.strength > 1.0: raise ValueError(f"Strength must be between 0.0 and 1.0, but got {block_state.strength}") @torch.no_grad() def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) self.check_inputs(components, block_state) init_timestep = min(block_state.num_inference_steps * block_state.strength, block_state.num_inference_steps) t_start = int(max(block_state.num_inference_steps - init_timestep, 0)) timesteps = components.scheduler.timesteps[t_start * components.scheduler.order :] if hasattr(components.scheduler, "set_begin_index"): components.scheduler.set_begin_index(t_start * components.scheduler.order) block_state.timesteps = timesteps block_state.num_inference_steps = block_state.num_inference_steps - t_start self.set_block_state(state, block_state) return components, state class ZImagePrepareLatentswithImageStep(ModularPipelineBlocks): model_name = "z-image" @property def description(self) -> str: return "step that prepares the latents with image condition, need to run after set timesteps and prepare latents step." @property def inputs(self) -> list[InputParam]: return [ InputParam("latents", required=True), InputParam("image_latents", required=True), InputParam("timesteps", required=True), ] def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) latent_timestep = block_state.timesteps[:1].repeat(block_state.latents.shape[0]) block_state.latents = components.scheduler.scale_noise( block_state.image_latents, latent_timestep, block_state.latents ) self.set_block_state(state, block_state) return components, state
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/z_image/before_denoise.py", "license": "Apache License 2.0", "lines": 508, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/z_image/decoders.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import numpy as np import PIL import torch from ...configuration_utils import FrozenDict from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL from ...utils import logging from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ZImageVaeDecoderStep(ModularPipelineBlocks): model_name = "z-image" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("vae", AutoencoderKL), ComponentSpec( "image_processor", VaeImageProcessor, config=FrozenDict({"vae_scale_factor": 8 * 2}), default_creation_method="from_config", ), ] @property def description(self) -> str: return "Step that decodes the denoised latents into images" @property def inputs(self) -> list[tuple[str, Any]]: return [ InputParam( "latents", required=True, ), InputParam( name="output_type", default="pil", type_hint=str, description="The type of the output images, can be 'pil', 'np', 'pt'", ), ] @property def intermediate_outputs(self) -> list[str]: return [ OutputParam( "images", type_hint=list[PIL.Image.Image, list[torch.Tensor], list[np.ndarray]], description="The generated images, can be a PIL.Image.Image, torch.Tensor or a numpy array", ) ] @torch.no_grad() def __call__(self, components, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) vae_dtype = components.vae.dtype latents = block_state.latents.to(vae_dtype) latents = latents / components.vae.config.scaling_factor + components.vae.config.shift_factor block_state.images = components.vae.decode(latents, return_dict=False)[0] block_state.images = components.image_processor.postprocess( block_state.images, output_type=block_state.output_type ) self.set_block_state(state, block_state) return components, state
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/z_image/decoders.py", "license": "Apache License 2.0", "lines": 75, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/z_image/denoise.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch from ...configuration_utils import FrozenDict from ...guiders import ClassifierFreeGuidance from ...models import ZImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging from ..modular_pipeline import ( BlockState, LoopSequentialPipelineBlocks, ModularPipelineBlocks, PipelineState, ) from ..modular_pipeline_utils import ComponentSpec, InputParam from .modular_pipeline import ZImageModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ZImageLoopBeforeDenoiser(ModularPipelineBlocks): model_name = "z-image" @property def description(self) -> str: return ( "step within the denoising loop that prepares the latent input for the denoiser. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `ZImageDenoiseLoopWrapper`)" ) @property def inputs(self) -> list[InputParam]: return [ InputParam( "latents", required=True, type_hint=torch.Tensor, description="The initial latents to use for the denoising process. Can be generated in prepare_latent step.", ), InputParam( "dtype", required=True, type_hint=torch.dtype, description="The dtype of the model inputs. Can be generated in input step.", ), ] @torch.no_grad() def __call__(self, components: ZImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): latents = block_state.latents.unsqueeze(2).to( block_state.dtype ) # [batch_size, num_channels, 1, height, width] block_state.latent_model_input = list(latents.unbind(dim=0)) # list of [num_channels, 1, height, width] timestep = t.expand(latents.shape[0]).to(block_state.dtype) timestep = (1000 - timestep) / 1000 block_state.timestep = timestep return components, block_state class ZImageLoopDenoiser(ModularPipelineBlocks): model_name = "z-image" def __init__( self, guider_input_fields: dict[str, Any] = {"cap_feats": ("prompt_embeds", "negative_prompt_embeds")}, ): """Initialize a denoiser block that calls the denoiser model. This block is used in Z-Image. Args: guider_input_fields: A dictionary that maps each argument expected by the denoiser model (for example, "encoder_hidden_states") to data stored on 'block_state'. The value can be either: - A tuple of strings. For instance, {"encoder_hidden_states": ("prompt_embeds", "negative_prompt_embeds")} tells the guider to read `block_state.prompt_embeds` and `block_state.negative_prompt_embeds` and pass them as the conditional and unconditional batches of 'encoder_hidden_states'. - A string. For example, {"encoder_hidden_image": "image_embeds"} makes the guider forward `block_state.image_embeds` for both conditional and unconditional batches. """ if not isinstance(guider_input_fields, dict): raise ValueError(f"guider_input_fields must be a dictionary but is {type(guider_input_fields)}") self._guider_input_fields = guider_input_fields super().__init__() @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 5.0, "enabled": False}), default_creation_method="from_config", ), ComponentSpec("transformer", ZImageTransformer2DModel), ] @property def description(self) -> str: return ( "Step within the denoising loop that denoise the latents with guidance. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `ZImageDenoiseLoopWrapper`)" ) @property def inputs(self) -> list[tuple[str, Any]]: inputs = [ InputParam( "num_inference_steps", required=True, type_hint=int, description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", ), InputParam( kwargs_type="denoiser_input_fields", description="The conditional model inputs for the denoiser: e.g. prompt_embeds, negative_prompt_embeds, etc.", ), ] guider_input_names = [] uncond_guider_input_names = [] for value in self._guider_input_fields.values(): if isinstance(value, tuple): guider_input_names.append(value[0]) uncond_guider_input_names.append(value[1]) else: guider_input_names.append(value) for name in guider_input_names: inputs.append(InputParam(name=name, required=True)) for name in uncond_guider_input_names: inputs.append(InputParam(name=name)) return inputs @torch.no_grad() def __call__( self, components: ZImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor ) -> PipelineState: components.guider.set_state(step=i, num_inference_steps=block_state.num_inference_steps, timestep=t) # The guider splits model inputs into separate batches for conditional/unconditional predictions. # For CFG with guider_inputs = {"encoder_hidden_states": (prompt_embeds, negative_prompt_embeds)}: # you will get a guider_state with two batches: # guider_state = [ # {"encoder_hidden_states": prompt_embeds, "__guidance_identifier__": "pred_cond"}, # conditional batch # {"encoder_hidden_states": negative_prompt_embeds, "__guidance_identifier__": "pred_uncond"}, # unconditional batch # ] # Other guidance methods may return 1 batch (no guidance) or 3+ batches (e.g., PAG, APG). guider_state = components.guider.prepare_inputs_from_block_state(block_state, self._guider_input_fields) # run the denoiser for each guidance batch for guider_state_batch in guider_state: components.guider.prepare_models(components.transformer) cond_kwargs = guider_state_batch.as_dict() def _convert_dtype(v, dtype): if isinstance(v, torch.Tensor): return v.to(dtype) elif isinstance(v, list): return [_convert_dtype(t, dtype) for t in v] return v cond_kwargs = { k: _convert_dtype(v, block_state.dtype) for k, v in cond_kwargs.items() if k in self._guider_input_fields.keys() } # Predict the noise residual # store the noise_pred in guider_state_batch so that we can apply guidance across all batches model_out_list = components.transformer( x=block_state.latent_model_input, t=block_state.timestep, return_dict=False, **cond_kwargs, )[0] noise_pred = torch.stack(model_out_list, dim=0).squeeze(2) guider_state_batch.noise_pred = -noise_pred components.guider.cleanup_models(components.transformer) # Perform guidance block_state.noise_pred = components.guider(guider_state)[0] return components, block_state class ZImageLoopAfterDenoiser(ModularPipelineBlocks): model_name = "z-image" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), ] @property def description(self) -> str: return ( "step within the denoising loop that update the latents. " "This block should be used to compose the `sub_blocks` attribute of a `LoopSequentialPipelineBlocks` " "object (e.g. `ZImageDenoiseLoopWrapper`)" ) @torch.no_grad() def __call__(self, components: ZImageModularPipeline, block_state: BlockState, i: int, t: torch.Tensor): # Perform scheduler step using the predicted output latents_dtype = block_state.latents.dtype block_state.latents = components.scheduler.step( block_state.noise_pred.float(), t, block_state.latents.float(), return_dict=False, )[0] if block_state.latents.dtype != latents_dtype: block_state.latents = block_state.latents.to(latents_dtype) return components, block_state class ZImageDenoiseLoopWrapper(LoopSequentialPipelineBlocks): model_name = "z-image" @property def description(self) -> str: return ( "Pipeline block that iteratively denoise the latents over `timesteps`. " "The specific steps with each iteration can be customized with `sub_blocks` attributes" ) @property def loop_expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("scheduler", FlowMatchEulerDiscreteScheduler), ] @property def loop_inputs(self) -> list[InputParam]: return [ InputParam( "timesteps", required=True, type_hint=torch.Tensor, description="The timesteps to use for the denoising process. Can be generated in set_timesteps step.", ), InputParam( "num_inference_steps", required=True, type_hint=int, description="The number of inference steps to use for the denoising process. Can be generated in set_timesteps step.", ), ] @torch.no_grad() def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) block_state.num_warmup_steps = max( len(block_state.timesteps) - block_state.num_inference_steps * components.scheduler.order, 0 ) with self.progress_bar(total=block_state.num_inference_steps) as progress_bar: for i, t in enumerate(block_state.timesteps): components, block_state = self.loop_step(components, block_state, i=i, t=t) if i == len(block_state.timesteps) - 1 or ( (i + 1) > block_state.num_warmup_steps and (i + 1) % components.scheduler.order == 0 ): progress_bar.update() self.set_block_state(state, block_state) return components, state class ZImageDenoiseStep(ZImageDenoiseLoopWrapper): block_classes = [ ZImageLoopBeforeDenoiser, ZImageLoopDenoiser( guider_input_fields={ "cap_feats": ("prompt_embeds", "negative_prompt_embeds"), } ), ZImageLoopAfterDenoiser, ] block_names = ["before_denoiser", "denoiser", "after_denoiser"] @property def description(self) -> str: return ( "Denoise step that iteratively denoise the latents. \n" "Its loop logic is defined in `ZImageDenoiseLoopWrapper.__call__` method \n" "At each iteration, it runs blocks defined in `sub_blocks` sequentially:\n" " - `ZImageLoopBeforeDenoiser`\n" " - `ZImageLoopDenoiser`\n" " - `ZImageLoopAfterDenoiser`\n" "This block supports text-to-image and image-to-image tasks for Z-Image." )
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/z_image/denoise.py", "license": "Apache License 2.0", "lines": 266, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/z_image/encoders.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import PIL import torch from transformers import Qwen2Tokenizer, Qwen3Model from ...configuration_utils import FrozenDict from ...guiders import ClassifierFreeGuidance from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL from ...utils import is_ftfy_available, logging from ..modular_pipeline import ModularPipelineBlocks, PipelineState from ..modular_pipeline_utils import ComponentSpec, InputParam, OutputParam from .modular_pipeline import ZImageModularPipeline if is_ftfy_available(): pass logger = logging.get_logger(__name__) # pylint: disable=invalid-name def get_qwen_prompt_embeds( text_encoder: Qwen3Model, tokenizer: Qwen2Tokenizer, prompt: str | list[str], device: torch.device, max_sequence_length: int = 512, ) -> list[torch.Tensor]: prompt = [prompt] if isinstance(prompt, str) else prompt for i, prompt_item in enumerate(prompt): messages = [ {"role": "user", "content": prompt_item}, ] prompt_item = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=True, ) prompt[i] = prompt_item text_inputs = tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) prompt_masks = text_inputs.attention_mask.to(device).bool() prompt_embeds = text_encoder( input_ids=text_input_ids, attention_mask=prompt_masks, output_hidden_states=True, ).hidden_states[-2] prompt_embeds_list = [] for i in range(len(prompt_embeds)): prompt_embeds_list.append(prompt_embeds[i][prompt_masks[i]]) return prompt_embeds_list # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def encode_vae_image( image_tensor: torch.Tensor, vae: AutoencoderKL, generator: torch.Generator, device: torch.device, dtype: torch.dtype, latent_channels: int = 16, ): if not isinstance(image_tensor, torch.Tensor): raise ValueError(f"Expected image_tensor to be a tensor, got {type(image_tensor)}.") if isinstance(generator, list) and len(generator) != image_tensor.shape[0]: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but it is not same as number of images {image_tensor.shape[0]}." ) image_tensor = image_tensor.to(device=device, dtype=dtype) if isinstance(generator, list): image_latents = [ retrieve_latents(vae.encode(image_tensor[i : i + 1]), generator=generator[i]) for i in range(image_tensor.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(vae.encode(image_tensor), generator=generator) image_latents = (image_latents - vae.config.shift_factor) * vae.config.scaling_factor return image_latents class ZImageTextEncoderStep(ModularPipelineBlocks): model_name = "z-image" @property def description(self) -> str: return "Text Encoder step that generate text_embeddings to guide the video generation" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("text_encoder", Qwen3Model), ComponentSpec("tokenizer", Qwen2Tokenizer), ComponentSpec( "guider", ClassifierFreeGuidance, config=FrozenDict({"guidance_scale": 5.0, "enabled": False}), default_creation_method="from_config", ), ] @property def inputs(self) -> list[InputParam]: return [ InputParam("prompt"), InputParam("negative_prompt"), InputParam("max_sequence_length", default=512), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "prompt_embeds", type_hint=list[torch.Tensor], kwargs_type="denoiser_input_fields", description="text embeddings used to guide the image generation", ), OutputParam( "negative_prompt_embeds", type_hint=list[torch.Tensor], kwargs_type="denoiser_input_fields", description="negative text embeddings used to guide the image generation", ), ] @staticmethod def check_inputs(block_state): if block_state.prompt is not None and ( not isinstance(block_state.prompt, str) and not isinstance(block_state.prompt, list) ): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(block_state.prompt)}") @staticmethod def encode_prompt( components, prompt: str, device: torch.device | None = None, prepare_unconditional_embeds: bool = True, negative_prompt: str | None = None, max_sequence_length: int = 512, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device prepare_unconditional_embeds (`bool`): whether to use prepare unconditional embeddings or not negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). max_sequence_length (`int`, defaults to `512`): The maximum number of text tokens to be used for the generation process. """ device = device or components._execution_device if not isinstance(prompt, list): prompt = [prompt] batch_size = len(prompt) prompt_embeds = get_qwen_prompt_embeds( text_encoder=components.text_encoder, tokenizer=components.tokenizer, prompt=prompt, max_sequence_length=max_sequence_length, device=device, ) negative_prompt_embeds = None if prepare_unconditional_embeds: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = get_qwen_prompt_embeds( text_encoder=components.text_encoder, tokenizer=components.tokenizer, prompt=negative_prompt, max_sequence_length=max_sequence_length, device=device, ) return prompt_embeds, negative_prompt_embeds @torch.no_grad() def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: # Get inputs and intermediates block_state = self.get_block_state(state) self.check_inputs(block_state) block_state.device = components._execution_device # Encode input prompt ( block_state.prompt_embeds, block_state.negative_prompt_embeds, ) = self.encode_prompt( components=components, prompt=block_state.prompt, device=block_state.device, prepare_unconditional_embeds=components.requires_unconditional_embeds, negative_prompt=block_state.negative_prompt, max_sequence_length=block_state.max_sequence_length, ) # Add outputs self.set_block_state(state, block_state) return components, state class ZImageVaeImageEncoderStep(ModularPipelineBlocks): model_name = "z-image" @property def description(self) -> str: return "Vae Image Encoder step that generate condition_latents based on image to guide the image generation" @property def expected_components(self) -> list[ComponentSpec]: return [ ComponentSpec("vae", AutoencoderKL), ComponentSpec( "image_processor", VaeImageProcessor, config=FrozenDict({"vae_scale_factor": 8 * 2}), default_creation_method="from_config", ), ] @property def inputs(self) -> list[InputParam]: return [ InputParam("image", type_hint=PIL.Image.Image, required=True), InputParam("height"), InputParam("width"), InputParam("generator"), ] @property def intermediate_outputs(self) -> list[OutputParam]: return [ OutputParam( "image_latents", type_hint=torch.Tensor, description="video latent representation with the first frame image condition", ), ] @staticmethod def check_inputs(components, block_state): if (block_state.height is not None and block_state.height % components.vae_scale_factor_spatial != 0) or ( block_state.width is not None and block_state.width % components.vae_scale_factor_spatial != 0 ): raise ValueError( f"`height` and `width` have to be divisible by {components.vae_scale_factor_spatial} but are {block_state.height} and {block_state.width}." ) def __call__(self, components: ZImageModularPipeline, state: PipelineState) -> PipelineState: block_state = self.get_block_state(state) self.check_inputs(components, block_state) image = block_state.image device = components._execution_device dtype = torch.float32 vae_dtype = components.vae.dtype image_tensor = components.image_processor.preprocess( image, height=block_state.height, width=block_state.width ).to(device=device, dtype=dtype) block_state.image_latents = encode_vae_image( image_tensor=image_tensor, vae=components.vae, generator=block_state.generator, device=device, dtype=vae_dtype, latent_channels=components.num_channels_latents, ) self.set_block_state(state, block_state) return components, state
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/z_image/encoders.py", "license": "Apache License 2.0", "lines": 288, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/modular_pipelines/z_image/modular_pipeline.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...loaders import ZImageLoraLoaderMixin from ...utils import logging from ..modular_pipeline import ModularPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name class ZImageModularPipeline( ModularPipeline, ZImageLoraLoaderMixin, ): """ A ModularPipeline for Z-Image. > [!WARNING] > This is an experimental feature and is likely to change in the future. """ default_blocks_name = "ZImageAutoBlocks" @property def default_height(self): return 1024 @property def default_width(self): return 1024 @property def vae_scale_factor_spatial(self): vae_scale_factor_spatial = 16 if hasattr(self, "image_processor") and self.image_processor is not None: vae_scale_factor_spatial = self.image_processor.config.vae_scale_factor return vae_scale_factor_spatial @property def vae_scale_factor(self): vae_scale_factor = 8 if hasattr(self, "vae") and self.vae is not None: vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) return vae_scale_factor @property def num_channels_latents(self): num_channels_latents = 16 if hasattr(self, "transformer") and self.transformer is not None: num_channels_latents = self.transformer.config.in_channels return num_channels_latents @property def requires_unconditional_embeds(self): requires_unconditional_embeds = False if hasattr(self, "guider") and self.guider is not None: requires_unconditional_embeds = self.guider._enabled and self.guider.num_conditions > 1 return requires_unconditional_embeds
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/modular_pipelines/z_image/modular_pipeline.py", "license": "Apache License 2.0", "lines": 56, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/z_image/pipeline_z_image_img2img.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import torch from transformers import AutoTokenizer, PreTrainedModel from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, ZImageLoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import ZImageTransformer2DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from .pipeline_output import ZImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import ZImageImg2ImgPipeline >>> from diffusers.utils import load_image >>> pipe = ZImageImg2ImgPipeline.from_pretrained("Z-a-o/Z-Image-Turbo", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" >>> init_image = load_image(url).resize((1024, 1024)) >>> prompt = "A fantasy landscape with mountains and a river, detailed, vibrant colors" >>> image = pipe( ... prompt, ... image=init_image, ... strength=0.6, ... num_inference_steps=9, ... guidance_scale=0.0, ... generator=torch.Generator("cuda").manual_seed(42), ... ).images[0] >>> image.save("zimage_img2img.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class ZImageImg2ImgPipeline(DiffusionPipeline, ZImageLoraLoaderMixin, FromSingleFileMixin): r""" The ZImage pipeline for image-to-image generation. Args: scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`PreTrainedModel`]): A text encoder model to encode text prompts. tokenizer ([`AutoTokenizer`]): A tokenizer to tokenize text prompts. transformer ([`ZImageTransformer2DModel`]): A ZImage transformer model to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: PreTrainedModel, tokenizer: AutoTokenizer, transformer: ZImageTransformer2DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, transformer=transformer, ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 ) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, do_classifier_free_guidance: bool = True, negative_prompt: str | list[str] | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, max_sequence_length: int = 512, ): prompt = [prompt] if isinstance(prompt, str) else prompt prompt_embeds = self._encode_prompt( prompt=prompt, device=device, prompt_embeds=prompt_embeds, max_sequence_length=max_sequence_length, ) if do_classifier_free_guidance: if negative_prompt is None: negative_prompt = ["" for _ in prompt] else: negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt assert len(prompt) == len(negative_prompt) negative_prompt_embeds = self._encode_prompt( prompt=negative_prompt, device=device, prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, ) else: negative_prompt_embeds = [] return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline._encode_prompt def _encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, max_sequence_length: int = 512, ) -> list[torch.FloatTensor]: device = device or self._execution_device if prompt_embeds is not None: return prompt_embeds if isinstance(prompt, str): prompt = [prompt] for i, prompt_item in enumerate(prompt): messages = [ {"role": "user", "content": prompt_item}, ] prompt_item = self.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=True, ) prompt[i] = prompt_item text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) prompt_masks = text_inputs.attention_mask.to(device).bool() prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_masks, output_hidden_states=True, ).hidden_states[-2] embeddings_list = [] for i in range(len(prompt_embeds)): embeddings_list.append(prompt_embeds[i][prompt_masks[i]]) return embeddings_list # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start def prepare_latents( self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is not None: return latents.to(device=device, dtype=dtype) # Encode the input image image = image.to(device=device, dtype=dtype) if image.shape[1] != num_channels_latents: if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) # Apply scaling (inverse of decoding: decode does latents/scaling_factor + shift_factor) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor else: image_latents = image # Handle batch size expansion if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) # Add noise using flow matching scale_noise noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.scale_noise(image_latents, timestep, noise) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, image: PipelineImageInput = None, strength: float = 0.6, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, sigmas: list[float] | None = None, guidance_scale: float = 5.0, cfg_normalization: bool = False, cfg_truncation: float = 1.0, negative_prompt: str | list[str] | None = None, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, negative_prompt_embeds: list[torch.FloatTensor] | None = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" Function invoked when calling the pipeline for image-to-image generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `list[torch.Tensor]`, `list[PIL.Image.Image]`, or `list[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]`. If it's a tensor or a list of tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`. strength (`float`, *optional*, defaults to 0.6): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. height (`int`, *optional*, defaults to 1024): The height in pixels of the generated image. If not provided, uses the input image height. width (`int`, *optional*, defaults to 1024): The width in pixels of the generated image. If not provided, uses the input image width. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. cfg_normalization (`bool`, *optional*, defaults to False): Whether to apply configuration normalization. cfg_truncation (`float`, *optional*, defaults to 1.0): The truncation value for configuration. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`list[torch.FloatTensor]`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`list[torch.FloatTensor]`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.ZImagePipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to 512): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.z_image.ZImagePipelineOutput`] or `tuple`: [`~pipelines.z_image.ZImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 1. Check inputs and validate strength if strength < 0 or strength > 1: raise ValueError(f"The value of strength should be in [0.0, 1.0] but is {strength}") # 2. Preprocess image init_image = self.image_processor.preprocess(image) init_image = init_image.to(dtype=torch.float32) # Get dimensions from the preprocessed image if not specified if height is None: height = init_image.shape[-2] if width is None: width = init_image.shape[-1] vae_scale = self.vae_scale_factor * 2 if height % vae_scale != 0: raise ValueError( f"Height must be divisible by {vae_scale} (got {height}). " f"Please adjust the height to a multiple of {vae_scale}." ) if width % vae_scale != 0: raise ValueError( f"Width must be divisible by {vae_scale} (got {width}). " f"Please adjust the width to a multiple of {vae_scale}." ) device = self._execution_device self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False self._cfg_normalization = cfg_normalization self._cfg_truncation = cfg_truncation # 3. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = len(prompt_embeds) # If prompt_embeds is provided and prompt is None, skip encoding if prompt_embeds is not None and prompt is None: if self.do_classifier_free_guidance and negative_prompt_embeds is None: raise ValueError( "When `prompt_embeds` is provided without `prompt`, " "`negative_prompt_embeds` must also be provided for classifier-free guidance." ) else: ( prompt_embeds, negative_prompt_embeds, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, device=device, max_sequence_length=max_sequence_length, ) # 4. Prepare latent variables num_channels_latents = self.transformer.in_channels # Repeat prompt_embeds for num_images_per_prompt if num_images_per_prompt > 1: prompt_embeds = [pe for pe in prompt_embeds for _ in range(num_images_per_prompt)] if self.do_classifier_free_guidance and negative_prompt_embeds: negative_prompt_embeds = [npe for npe in negative_prompt_embeds for _ in range(num_images_per_prompt)] actual_batch_size = batch_size * num_images_per_prompt # Calculate latent dimensions for image_seq_len latent_height = 2 * (int(height) // (self.vae_scale_factor * 2)) latent_width = 2 * (int(width) // (self.vae_scale_factor * 2)) image_seq_len = (latent_height // 2) * (latent_width // 2) # 5. Prepare timesteps mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) self.scheduler.sigma_min = 0.0 scheduler_kwargs = {"mu": mu} timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, **scheduler_kwargs, ) # 6. Adjust timesteps based on strength timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline " f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(actual_batch_size) # 7. Prepare latents from image latents = self.prepare_latents( init_image, latent_timestep, actual_batch_size, num_channels_latents, height, width, prompt_embeds[0].dtype, device, generator, latents, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 8. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]) timestep = (1000 - timestep) / 1000 # Normalized time for time-aware config (0 at start, 1 at end) t_norm = timestep[0].item() # Handle cfg truncation current_guidance_scale = self.guidance_scale if ( self.do_classifier_free_guidance and self._cfg_truncation is not None and float(self._cfg_truncation) <= 1 ): if t_norm > self._cfg_truncation: current_guidance_scale = 0.0 # Run CFG only if configured AND scale is non-zero apply_cfg = self.do_classifier_free_guidance and current_guidance_scale > 0 if apply_cfg: latents_typed = latents.to(self.transformer.dtype) latent_model_input = latents_typed.repeat(2, 1, 1, 1) prompt_embeds_model_input = prompt_embeds + negative_prompt_embeds timestep_model_input = timestep.repeat(2) else: latent_model_input = latents.to(self.transformer.dtype) prompt_embeds_model_input = prompt_embeds timestep_model_input = timestep latent_model_input = latent_model_input.unsqueeze(2) latent_model_input_list = list(latent_model_input.unbind(dim=0)) model_out_list = self.transformer( latent_model_input_list, timestep_model_input, prompt_embeds_model_input, )[0] if apply_cfg: # Perform CFG pos_out = model_out_list[:actual_batch_size] neg_out = model_out_list[actual_batch_size:] noise_pred = [] for j in range(actual_batch_size): pos = pos_out[j].float() neg = neg_out[j].float() pred = pos + current_guidance_scale * (pos - neg) # Renormalization if self._cfg_normalization and float(self._cfg_normalization) > 0.0: ori_pos_norm = torch.linalg.vector_norm(pos) new_pos_norm = torch.linalg.vector_norm(pred) max_new_norm = ori_pos_norm * float(self._cfg_normalization) if new_pos_norm > max_new_norm: pred = pred * (max_new_norm / new_pos_norm) noise_pred.append(pred) noise_pred = torch.stack(noise_pred, dim=0) else: noise_pred = torch.stack([t.float() for t in model_out_list], dim=0) noise_pred = noise_pred.squeeze(2) noise_pred = -noise_pred # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred.to(torch.float32), t, latents, return_dict=False)[0] assert latents.dtype == torch.float32 if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == "latent": image = latents else: latents = latents.to(self.vae.dtype) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ZImagePipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/z_image/pipeline_z_image_img2img.py", "license": "Apache License 2.0", "lines": 615, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/z_image/test_z_image_img2img.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import unittest import numpy as np import torch from transformers import Qwen2Tokenizer, Qwen3Config, Qwen3Model from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, ZImageImg2ImgPipeline, ZImageTransformer2DModel, ) from diffusers.utils.testing_utils import floats_tensor from ...testing_utils import torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin, to_np # Z-Image requires torch.use_deterministic_algorithms(False) due to complex64 RoPE operations # Cannot use enable_full_determinism() which sets it to True # Note: Z-Image does not support FP16 inference due to complex64 RoPE embeddings os.environ["CUDA_LAUNCH_BLOCKING"] = "1" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.use_deterministic_algorithms(False) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if hasattr(torch.backends, "cuda"): torch.backends.cuda.matmul.allow_tf32 = False class ZImageImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ZImageImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "strength", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def setUp(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) def tearDown(self): super().tearDown() gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) def get_dummy_components(self): torch.manual_seed(0) transformer = ZImageTransformer2DModel( all_patch_size=(2,), all_f_patch_size=(1,), in_channels=16, dim=32, n_layers=2, n_refiner_layers=1, n_heads=2, n_kv_heads=2, norm_eps=1e-5, qk_norm=True, cap_feat_dim=16, rope_theta=256.0, t_scale=1000.0, axes_dims=[8, 4, 4], axes_lens=[256, 32, 32], ) torch.manual_seed(0) vae = AutoencoderKL( in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], block_out_channels=[32, 64], layers_per_block=1, latent_channels=16, norm_num_groups=32, sample_size=32, scaling_factor=0.3611, shift_factor=0.1159, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler() torch.manual_seed(0) config = Qwen3Config( hidden_size=16, intermediate_size=16, num_hidden_layers=2, num_attention_heads=2, num_key_value_heads=2, vocab_size=151936, max_position_embeddings=512, ) text_encoder = Qwen3Model(config) tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): import random if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) inputs = { "prompt": "dance monkey", "negative_prompt": "bad quality", "image": image, "strength": 0.6, "generator": generator, "num_inference_steps": 2, "guidance_scale": 3.0, "cfg_normalization": False, "cfg_truncation": 1.0, "height": 32, "width": 32, "max_sequence_length": 16, "output_type": "np", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images generated_image = image[0] self.assertEqual(generated_image.shape, (32, 32, 3)) def test_inference_batch_single_identical(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) def test_num_images_per_prompt(self): import inspect sig = inspect.signature(self.pipeline_class.__call__) if "num_images_per_prompt" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt del pipe gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.3): import random generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 # Generate a larger image for the input inputs["image"] = floats_tensor((1, 3, 128, 128), rng=random.Random(0)).to("cpu") output_without_tiling = pipe(**inputs)[0] # With tiling (standard AutoencoderKL doesn't accept parameters) pipe.vae.enable_tiling() inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 inputs["image"] = floats_tensor((1, 3, 128, 128), rng=random.Random(0)).to("cpu") output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", ) def test_pipeline_with_accelerator_device_map(self, expected_max_difference=5e-4): # Z-Image RoPE embeddings (complex64) have slightly higher numerical tolerance super().test_pipeline_with_accelerator_device_map(expected_max_difference=expected_max_difference) def test_group_offloading_inference(self): # Block-level offloading conflicts with RoPE cache. Pipeline-level offloading (tested separately) works fine. self.skipTest("Using test_pipeline_level_group_offloading_inference instead") def test_save_load_float16(self, expected_max_diff=1e-2): # Z-Image does not support FP16 due to complex64 RoPE embeddings self.skipTest("Z-Image does not support FP16 inference") def test_float16_inference(self, expected_max_diff=5e-2): # Z-Image does not support FP16 due to complex64 RoPE embeddings self.skipTest("Z-Image does not support FP16 inference") def test_strength_parameter(self): """Test that strength parameter affects the output correctly.""" device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) # Test with different strength values inputs_low_strength = self.get_dummy_inputs(device) inputs_low_strength["strength"] = 0.2 inputs_high_strength = self.get_dummy_inputs(device) inputs_high_strength["strength"] = 0.8 # Both should complete without errors output_low = pipe(**inputs_low_strength).images[0] output_high = pipe(**inputs_high_strength).images[0] # Outputs should be different (different amount of transformation) self.assertFalse(np.allclose(output_low, output_high, atol=1e-3)) def test_invalid_strength(self): """Test that invalid strength values raise appropriate errors.""" device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) inputs = self.get_dummy_inputs(device) # Test strength < 0 inputs["strength"] = -0.1 with self.assertRaises(ValueError): pipe(**inputs) # Test strength > 1 inputs["strength"] = 1.5 with self.assertRaises(ValueError): pipe(**inputs)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/z_image/test_z_image_img2img.py", "license": "Apache License 2.0", "lines": 299, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/hooks/taylorseer_cache.py
import math import re from dataclasses import dataclass import torch import torch.nn as nn from ..utils import logging from .hooks import HookRegistry, ModelHook, StateManager logger = logging.get_logger(__name__) _TAYLORSEER_CACHE_HOOK = "taylorseer_cache" _SPATIAL_ATTENTION_BLOCK_IDENTIFIERS = ( "^blocks.*attn", "^transformer_blocks.*attn", "^single_transformer_blocks.*attn", ) _TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS = ("^temporal_transformer_blocks.*attn",) _TRANSFORMER_BLOCK_IDENTIFIERS = _SPATIAL_ATTENTION_BLOCK_IDENTIFIERS + _TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS _BLOCK_IDENTIFIERS = ("^[^.]*block[^.]*\\.[^.]+$",) _PROJ_OUT_IDENTIFIERS = ("^proj_out$",) @dataclass class TaylorSeerCacheConfig: """ Configuration for TaylorSeer cache. See: https://huggingface.co/papers/2503.06923 Attributes: cache_interval (`int`, defaults to `5`): The interval between full computation steps. After a full computation, the cached (predicted) outputs are reused for this many subsequent denoising steps before refreshing with a new full forward pass. disable_cache_before_step (`int`, defaults to `3`): The denoising step index before which caching is disabled, meaning full computation is performed for the initial steps (0 to disable_cache_before_step - 1) to gather data for Taylor series approximations. During these steps, Taylor factors are updated, but caching/predictions are not applied. Caching begins at this step. disable_cache_after_step (`int`, *optional*, defaults to `None`): The denoising step index after which caching is disabled. If set, for steps >= this value, all modules run full computations without predictions or state updates, ensuring accuracy in later stages if needed. max_order (`int`, defaults to `1`): The highest order in the Taylor series expansion for approximating module outputs. Higher orders provide better approximations but increase computation and memory usage. taylor_factors_dtype (`torch.dtype`, defaults to `torch.bfloat16`): Data type used for storing and computing Taylor series factors. Lower precision reduces memory but may affect stability; higher precision improves accuracy at the cost of more memory. skip_predict_identifiers (`list[str]`, *optional*, defaults to `None`): Regex patterns (using `re.fullmatch`) for module names to place as "skip" in "cache" mode. In this mode, the module computes fully during initial or refresh steps but returns a zero tensor (matching recorded shape) during prediction steps to skip computation cheaply. cache_identifiers (`list[str]`, *optional*, defaults to `None`): Regex patterns (using `re.fullmatch`) for module names to place in Taylor-series caching mode, where outputs are approximated and cached for reuse. use_lite_mode (`bool`, *optional*, defaults to `False`): Enables a lightweight TaylorSeer variant that minimizes memory usage by applying predefined patterns for skipping and caching (e.g., skipping blocks and caching projections). This overrides any custom `inactive_identifiers` or `active_identifiers`. Notes: - Patterns are matched using `re.fullmatch` on the module name. - If `skip_predict_identifiers` or `cache_identifiers` are provided, only matching modules are hooked. - If neither is provided, all attention-like modules are hooked by default. Example of inactive and active usage: ```py def forward(x): x = self.module1(x) # inactive module: returns zeros tensor based on shape recorded during full compute x = self.module2(x) # active module: caches output here, avoiding recomputation of prior steps return x ``` """ cache_interval: int = 5 disable_cache_before_step: int = 3 disable_cache_after_step: int | None = None max_order: int = 1 taylor_factors_dtype: torch.dtype | None = torch.bfloat16 skip_predict_identifiers: list[str] | None = None cache_identifiers: list[str] | None = None use_lite_mode: bool = False def __repr__(self) -> str: return ( "TaylorSeerCacheConfig(" f"cache_interval={self.cache_interval}, " f"disable_cache_before_step={self.disable_cache_before_step}, " f"disable_cache_after_step={self.disable_cache_after_step}, " f"max_order={self.max_order}, " f"taylor_factors_dtype={self.taylor_factors_dtype}, " f"skip_predict_identifiers={self.skip_predict_identifiers}, " f"cache_identifiers={self.cache_identifiers}, " f"use_lite_mode={self.use_lite_mode})" ) class TaylorSeerState: def __init__( self, taylor_factors_dtype: torch.dtype | None = torch.bfloat16, max_order: int = 1, is_inactive: bool = False, ): self.taylor_factors_dtype = taylor_factors_dtype self.max_order = max_order self.is_inactive = is_inactive self.module_dtypes: tuple[torch.dtype, ...] = () self.last_update_step: int | None = None self.taylor_factors: dict[int, dict[int, torch.Tensor]] = {} self.inactive_shapes: tuple[tuple[int, ...], ...] | None = None self.device: torch.device | None = None self.current_step: int = -1 def reset(self) -> None: self.current_step = -1 self.last_update_step = None self.taylor_factors = {} self.inactive_shapes = None self.device = None def update( self, outputs: tuple[torch.Tensor, ...], ) -> None: self.module_dtypes = tuple(output.dtype for output in outputs) self.device = outputs[0].device if self.is_inactive: self.inactive_shapes = tuple(output.shape for output in outputs) else: for i, features in enumerate(outputs): new_factors: dict[int, torch.Tensor] = {0: features} is_first_update = self.last_update_step is None if not is_first_update: delta_step = self.current_step - self.last_update_step if delta_step == 0: raise ValueError("Delta step cannot be zero for TaylorSeer update.") # Recursive divided differences up to max_order prev_factors = self.taylor_factors.get(i, {}) for j in range(self.max_order): prev = prev_factors.get(j) if prev is None: break new_factors[j + 1] = (new_factors[j] - prev.to(features.dtype)) / delta_step self.taylor_factors[i] = { order: factor.to(self.taylor_factors_dtype) for order, factor in new_factors.items() } self.last_update_step = self.current_step @torch.compiler.disable def predict(self) -> list[torch.Tensor]: if self.last_update_step is None: raise ValueError("Cannot predict without prior initialization/update.") step_offset = self.current_step - self.last_update_step outputs = [] if self.is_inactive: if self.inactive_shapes is None: raise ValueError("Inactive shapes not set during prediction.") for i in range(len(self.module_dtypes)): outputs.append( torch.zeros( self.inactive_shapes[i], dtype=self.module_dtypes[i], device=self.device, ) ) else: if not self.taylor_factors: raise ValueError("Taylor factors empty during prediction.") num_outputs = len(self.taylor_factors) num_orders = len(self.taylor_factors[0]) for i in range(num_outputs): output_dtype = self.module_dtypes[i] taylor_factors = self.taylor_factors[i] output = torch.zeros_like(taylor_factors[0], dtype=output_dtype) for order in range(num_orders): coeff = (step_offset**order) / math.factorial(order) factor = taylor_factors[order] output = output + factor.to(output_dtype) * coeff outputs.append(output) return outputs class TaylorSeerCacheHook(ModelHook): _is_stateful = True def __init__( self, cache_interval: int, disable_cache_before_step: int, taylor_factors_dtype: torch.dtype, state_manager: StateManager, disable_cache_after_step: int | None = None, ): super().__init__() self.cache_interval = cache_interval self.disable_cache_before_step = disable_cache_before_step self.disable_cache_after_step = disable_cache_after_step self.taylor_factors_dtype = taylor_factors_dtype self.state_manager = state_manager def initialize_hook(self, module: torch.nn.Module): return module def reset_state(self, module: torch.nn.Module) -> None: """ Reset state between sampling runs. """ self.state_manager.reset() @torch.compiler.disable def _measure_should_compute(self) -> bool: state: TaylorSeerState = self.state_manager.get_state() state.current_step += 1 current_step = state.current_step is_warmup_phase = current_step < self.disable_cache_before_step is_compute_interval = (current_step - self.disable_cache_before_step - 1) % self.cache_interval == 0 is_cooldown_phase = self.disable_cache_after_step is not None and current_step >= self.disable_cache_after_step should_compute = is_warmup_phase or is_compute_interval or is_cooldown_phase return should_compute, state def new_forward(self, module: torch.nn.Module, *args, **kwargs): should_compute, state = self._measure_should_compute() if should_compute: outputs = self.fn_ref.original_forward(*args, **kwargs) wrapped_outputs = (outputs,) if isinstance(outputs, torch.Tensor) else outputs state.update(wrapped_outputs) return outputs outputs_list = state.predict() return outputs_list[0] if len(outputs_list) == 1 else tuple(outputs_list) def _resolve_patterns(config: TaylorSeerCacheConfig) -> tuple[list[str], list[str]]: """ Resolve effective inactive and active pattern lists from config + templates. """ inactive_patterns = config.skip_predict_identifiers if config.skip_predict_identifiers is not None else None active_patterns = config.cache_identifiers if config.cache_identifiers is not None else None return inactive_patterns or [], active_patterns or [] def apply_taylorseer_cache(module: torch.nn.Module, config: TaylorSeerCacheConfig): """ Applies the TaylorSeer cache to a given pipeline (typically the transformer / UNet). This function hooks selected modules in the model to enable caching or skipping based on the provided configuration, reducing redundant computations in diffusion denoising loops. Args: module (torch.nn.Module): The model subtree to apply the hooks to. config (TaylorSeerCacheConfig): Configuration for the cache. Example: ```python >>> import torch >>> from diffusers import FluxPipeline, TaylorSeerCacheConfig >>> pipe = FluxPipeline.from_pretrained( ... "black-forest-labs/FLUX.1-dev", ... torch_dtype=torch.bfloat16, ... ) >>> pipe.to("cuda") >>> config = TaylorSeerCacheConfig( ... cache_interval=5, ... max_order=1, ... disable_cache_before_step=3, ... taylor_factors_dtype=torch.float32, ... ) >>> pipe.transformer.enable_cache(config) ``` """ inactive_patterns, active_patterns = _resolve_patterns(config) active_patterns = active_patterns or _TRANSFORMER_BLOCK_IDENTIFIERS if config.use_lite_mode: logger.info("Using TaylorSeer Lite variant for cache.") active_patterns = _PROJ_OUT_IDENTIFIERS inactive_patterns = _BLOCK_IDENTIFIERS if config.skip_predict_identifiers or config.cache_identifiers: logger.warning("Lite mode overrides user patterns.") for name, submodule in module.named_modules(): matches_inactive = any(re.fullmatch(pattern, name) for pattern in inactive_patterns) matches_active = any(re.fullmatch(pattern, name) for pattern in active_patterns) if not (matches_inactive or matches_active): continue _apply_taylorseer_cache_hook( module=submodule, config=config, is_inactive=matches_inactive, ) def _apply_taylorseer_cache_hook( module: nn.Module, config: TaylorSeerCacheConfig, is_inactive: bool, ): """ Registers the TaylorSeer hook on the specified nn.Module. Args: name: Name of the module. module: The nn.Module to be hooked. config: Cache configuration. is_inactive: Whether this module should operate in "inactive" mode. """ state_manager = StateManager( TaylorSeerState, init_kwargs={ "taylor_factors_dtype": config.taylor_factors_dtype, "max_order": config.max_order, "is_inactive": is_inactive, }, ) registry = HookRegistry.check_if_exists_or_initialize(module) hook = TaylorSeerCacheHook( cache_interval=config.cache_interval, disable_cache_before_step=config.disable_cache_before_step, taylor_factors_dtype=config.taylor_factors_dtype, disable_cache_after_step=config.disable_cache_after_step, state_manager=state_manager, ) registry.register_hook(hook, _TAYLORSEER_CACHE_HOOK)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/hooks/taylorseer_cache.py", "license": "Apache License 2.0", "lines": 287, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:tests/models/transformers/test_models_transformer_z_image.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import unittest import torch from diffusers import ZImageTransformer2DModel from ...testing_utils import IS_GITHUB_ACTIONS, torch_device from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin # Z-Image requires torch.use_deterministic_algorithms(False) due to complex64 RoPE operations # Cannot use enable_full_determinism() which sets it to True os.environ["CUDA_LAUNCH_BLOCKING"] = "1" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.use_deterministic_algorithms(False) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if hasattr(torch.backends, "cuda"): torch.backends.cuda.matmul.allow_tf32 = False @unittest.skipIf( IS_GITHUB_ACTIONS, reason="Skipping test-suite inside the CI because the model has `torch.empty()` inside of it during init and we don't have a clear way to override it in the modeling tests.", ) class ZImageTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = ZImageTransformer2DModel main_input_name = "x" # We override the items here because the transformer under consideration is small. model_split_percents = [0.9, 0.9, 0.9] def prepare_dummy_input(self, height=16, width=16): batch_size = 1 num_channels = 16 embedding_dim = 16 sequence_length = 16 hidden_states = [torch.randn((num_channels, 1, height, width)).to(torch_device) for _ in range(batch_size)] encoder_hidden_states = [ torch.randn((sequence_length, embedding_dim)).to(torch_device) for _ in range(batch_size) ] timestep = torch.tensor([0.0]).to(torch_device) return {"x": hidden_states, "cap_feats": encoder_hidden_states, "t": timestep} @property def dummy_input(self): return self.prepare_dummy_input() @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "all_patch_size": (2,), "all_f_patch_size": (1,), "in_channels": 16, "dim": 16, "n_layers": 1, "n_refiner_layers": 1, "n_heads": 1, "n_kv_heads": 2, "qk_norm": True, "cap_feat_dim": 16, "rope_theta": 256.0, "t_scale": 1000.0, "axes_dims": [8, 4, 4], "axes_lens": [256, 32, 32], } inputs_dict = self.dummy_input return init_dict, inputs_dict def setUp(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) def tearDown(self): super().tearDown() gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) def test_gradient_checkpointing_is_applied(self): expected_set = {"ZImageTransformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) @unittest.skip("Test is not supported for handling main inputs that are lists.") def test_training(self): super().test_training() @unittest.skip("Test is not supported for handling main inputs that are lists.") def test_ema_training(self): super().test_ema_training() @unittest.skip("Test is not supported for handling main inputs that are lists.") def test_effective_gradient_checkpointing(self): super().test_effective_gradient_checkpointing() @unittest.skip( "Test needs to be revisited. But we need to ensure `x_pad_token` and `cap_pad_token` are cast to the same dtype as the destination tensor before they are assigned to the padding indices." ) def test_layerwise_casting_training(self): super().test_layerwise_casting_training() @unittest.skip("Test is not supported for handling main inputs that are lists.") def test_outputs_equivalence(self): super().test_outputs_equivalence() @unittest.skip("Test will pass if we change to deterministic values instead of empty in the DiT.") def test_group_offloading(self): super().test_group_offloading() @unittest.skip("Test will pass if we change to deterministic values instead of empty in the DiT.") def test_group_offloading_with_disk(self): super().test_group_offloading_with_disk() class ZImageTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = ZImageTransformer2DModel different_shapes_for_compilation = [(4, 4), (4, 8), (8, 8)] def prepare_init_args_and_inputs_for_common(self): return ZImageTransformerTests().prepare_init_args_and_inputs_for_common() def prepare_dummy_input(self, height, width): return ZImageTransformerTests().prepare_dummy_input(height=height, width=width) @unittest.skip( "The repeated block in this model is ZImageTransformerBlock, which is used for noise_refiner, context_refiner, and layers. As a consequence of this, the inputs recorded for the block would vary during compilation and full compilation with fullgraph=True would trigger recompilation at least thrice." ) def test_torch_compile_recompilation_and_graph_break(self): super().test_torch_compile_recompilation_and_graph_break() @unittest.skip("Fullgraph AoT is broken") def test_compile_works_with_aot(self): super().test_compile_works_with_aot() @unittest.skip("Fullgraph is broken") def test_compile_on_different_shapes(self): super().test_compile_on_different_shapes()
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_z_image.py", "license": "Apache License 2.0", "lines": 139, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/pipelines/kandinsky5/pipeline_kandinsky_i2i.py
# Copyright 2025 The Kandinsky Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from typing import Callable import numpy as np import regex as re import torch from torch.nn import functional as F from transformers import CLIPTextModel, CLIPTokenizer, Qwen2_5_VLForConditionalGeneration, Qwen2VLProcessor from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import KandinskyLoraLoaderMixin from ...models import AutoencoderKL from ...models.transformers import Kandinsky5Transformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler # Add imports for offloading and tiling from ...utils import ( is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import KandinskyImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import Kandinsky5I2IPipeline >>> # Available models: >>> # kandinskylab/Kandinsky-5.0-I2I-Lite-sft-Diffusers >>> # kandinskylab/Kandinsky-5.0-I2I-Lite-pretrain-Diffusers >>> model_id = "kandinskylab/Kandinsky-5.0-I2I-Lite-sft-Diffusers" >>> pipe = Kandinsky5I2IPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16) >>> pipe = pipe.to("cuda") >>> prompt = "A cat and a dog baking a cake together in a kitchen." >>> output = pipe( ... prompt=prompt, ... negative_prompt="", ... height=1024, ... width=1024, ... num_inference_steps=50, ... guidance_scale=3.5, ... ).frames[0] ``` """ def basic_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Clean text using ftfy if available and unescape HTML entities. """ if is_ftfy_available(): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Normalize whitespace in text by replacing multiple spaces with single space. """ text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Apply both basic cleaning and whitespace normalization to prompts. """ text = whitespace_clean(basic_clean(text)) return text class Kandinsky5I2IPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin): r""" Pipeline for image-to-image generation using Kandinsky 5.0. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: transformer ([`Kandinsky5Transformer3DModel`]): Conditional Transformer to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder Model [black-forest-labs/FLUX.1-dev (vae)](https://huggingface.co/black-forest-labs/FLUX.1-dev) to encode and decode videos to and from latent representations. text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): Frozen text-encoder [Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct). tokenizer ([`AutoProcessor`]): Tokenizer for Qwen2.5-VL. text_encoder_2 ([`CLIPTextModel`]): Frozen [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer_2 ([`CLIPTokenizer`]): Tokenizer for CLIP. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _callback_tensor_inputs = [ "latents", "prompt_embeds_qwen", "prompt_embeds_clip", "negative_prompt_embeds_qwen", "negative_prompt_embeds_clip", ] def __init__( self, transformer: Kandinsky5Transformer3DModel, vae: AutoencoderKL, text_encoder: Qwen2_5_VLForConditionalGeneration, tokenizer: Qwen2VLProcessor, text_encoder_2: CLIPTextModel, tokenizer_2: CLIPTokenizer, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( transformer=transformer, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, scheduler=scheduler, ) self.prompt_template = "<|im_start|>system\nYou are a promt engineer. Based on the provided source image (first image) and target image (second image), create an interesting text prompt that can be used together with the source image to create the target image:<|im_end|><|im_start|>user{}<|vision_start|><|image_pad|><|vision_end|><|im_end|>" self.prompt_template_encode_start_idx = 55 self.vae_scale_factor_spatial = 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor_spatial) self.resolutions = [(1024, 1024), (640, 1408), (1408, 640), (768, 1280), (1280, 768), (896, 1152), (1152, 896)] def _encode_prompt_qwen( self, prompt: list[str], image: PipelineImageInput | None = None, device: torch.device | None = None, max_sequence_length: int = 1024, dtype: torch.dtype | None = None, ): """ Encode prompt using Qwen2.5-VL text encoder. This method processes the input prompt through the Qwen2.5-VL model to generate text embeddings suitable for image generation. Args: prompt list[str]: Input list of prompts image (PipelineImageInput): Input list of images to condition the generation on device (torch.device): Device to run encoding on max_sequence_length (int): Maximum sequence length for tokenization dtype (torch.dtype): Data type for embeddings Returns: tuple[torch.Tensor, torch.Tensor]: Text embeddings and cumulative sequence lengths """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype if not isinstance(image, list): image = [image] image = [i.resize((i.size[0] // 2, i.size[1] // 2)) for i in image] full_texts = [self.prompt_template.format(p) for p in prompt] max_allowed_len = self.prompt_template_encode_start_idx + max_sequence_length untruncated_ids = self.tokenizer( text=full_texts, images=image, videos=None, return_tensors="pt", padding="longest", )["input_ids"] if untruncated_ids.shape[-1] > max_allowed_len: for i, text in enumerate(full_texts): tokens = untruncated_ids[i] num_image_tokens = (tokens == self.tokenizer.image_token_id).sum() tokens = tokens[tokens != self.tokenizer.image_token_id][self.prompt_template_encode_start_idx : -3] removed_text = self.tokenizer.decode(tokens[max_sequence_length - num_image_tokens - 3 :]) if len(removed_text) > 0: full_texts[i] = text[: -len(removed_text)] logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) inputs = self.tokenizer( text=full_texts, images=image, videos=None, max_length=max_allowed_len, truncation=True, return_tensors="pt", padding=True, ).to(device) embeds = self.text_encoder( **inputs, return_dict=True, output_hidden_states=True, )["hidden_states"][-1][:, self.prompt_template_encode_start_idx :] attention_mask = inputs["attention_mask"][:, self.prompt_template_encode_start_idx :] cu_seqlens = torch.cumsum(attention_mask.sum(1), dim=0) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0).to(dtype=torch.int32) return embeds.to(dtype), cu_seqlens def _encode_prompt_clip( self, prompt: str | list[str], device: torch.device | None = None, dtype: torch.dtype | None = None, ): """ Encode prompt using CLIP text encoder. This method processes the input prompt through the CLIP model to generate pooled embeddings that capture semantic information. Args: prompt (str | list[str]): Input prompt or list of prompts device (torch.device): Device to run encoding on dtype (torch.dtype): Data type for embeddings Returns: torch.Tensor: Pooled text embeddings from CLIP """ device = device or self._execution_device dtype = dtype or self.text_encoder_2.dtype inputs = self.tokenizer_2( prompt, max_length=77, truncation=True, add_special_tokens=True, padding="max_length", return_tensors="pt", ).to(device) pooled_embed = self.text_encoder_2(**inputs)["pooler_output"] return pooled_embed.to(dtype) def encode_prompt( self, prompt: str | list[str], image: torch.Tensor, num_images_per_prompt: int = 1, max_sequence_length: int = 1024, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes a single prompt (positive or negative) into text encoder hidden states. This method combines embeddings from both Qwen2.5-VL and CLIP text encoders to create comprehensive text representations for image generation. Args: prompt (`str` or `list[str]`): Prompt to be encoded. num_images_per_prompt (`int`, *optional*, defaults to 1): Number of images to generate per prompt. max_sequence_length (`int`, *optional*, defaults to 1024): Maximum sequence length for text encoding. Must be less than 1024 device (`torch.device`, *optional*): Torch device. dtype (`torch.dtype`, *optional*): Torch dtype. Returns: tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - Qwen text embeddings of shape (batch_size * num_images_per_prompt, sequence_length, embedding_dim) - CLIP pooled embeddings of shape (batch_size * num_images_per_prompt, clip_embedding_dim) - Cumulative sequence lengths (`cu_seqlens`) for Qwen embeddings of shape (batch_size * num_images_per_prompt + 1,) """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype if not isinstance(prompt, list): prompt = [prompt] batch_size = len(prompt) prompt = [prompt_clean(p) for p in prompt] # Encode with Qwen2.5-VL prompt_embeds_qwen, prompt_cu_seqlens = self._encode_prompt_qwen( prompt=prompt, image=image, device=device, max_sequence_length=max_sequence_length, dtype=dtype, ) # prompt_embeds_qwen shape: [batch_size, seq_len, embed_dim] # Encode with CLIP prompt_embeds_clip = self._encode_prompt_clip( prompt=prompt, device=device, dtype=dtype, ) # prompt_embeds_clip shape: [batch_size, clip_embed_dim] # Repeat embeddings for num_images_per_prompt # Qwen embeddings: repeat sequence for each image, then reshape prompt_embeds_qwen = prompt_embeds_qwen.repeat( 1, num_images_per_prompt, 1 ) # [batch_size, seq_len * num_images_per_prompt, embed_dim] # Reshape to [batch_size * num_images_per_prompt, seq_len, embed_dim] prompt_embeds_qwen = prompt_embeds_qwen.view( batch_size * num_images_per_prompt, -1, prompt_embeds_qwen.shape[-1] ) # CLIP embeddings: repeat for each image prompt_embeds_clip = prompt_embeds_clip.repeat( 1, num_images_per_prompt, 1 ) # [batch_size, num_images_per_prompt, clip_embed_dim] # Reshape to [batch_size * num_images_per_prompt, clip_embed_dim] prompt_embeds_clip = prompt_embeds_clip.view(batch_size * num_images_per_prompt, -1) # Repeat cumulative sequence lengths for num_images_per_prompt # Original differences (lengths) for each prompt in the batch original_lengths = prompt_cu_seqlens.diff() # [len1, len2, ...] # Repeat the lengths for num_images_per_prompt repeated_lengths = original_lengths.repeat_interleave( num_images_per_prompt ) # [len1, len1, ..., len2, len2, ...] # Reconstruct the cumulative lengths repeated_cu_seqlens = torch.cat( [torch.tensor([0], device=device, dtype=torch.int32), repeated_lengths.cumsum(0)] ) return prompt_embeds_qwen, prompt_embeds_clip, repeated_cu_seqlens def check_inputs( self, prompt, negative_prompt, image, height, width, prompt_embeds_qwen=None, prompt_embeds_clip=None, negative_prompt_embeds_qwen=None, negative_prompt_embeds_clip=None, prompt_cu_seqlens=None, negative_prompt_cu_seqlens=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): """ Validate input parameters for the pipeline. Args: prompt: Input prompt negative_prompt: Negative prompt for guidance image: Input image for conditioning height: Image height width: Image width prompt_embeds_qwen: Pre-computed Qwen prompt embeddings prompt_embeds_clip: Pre-computed CLIP prompt embeddings negative_prompt_embeds_qwen: Pre-computed Qwen negative prompt embeddings negative_prompt_embeds_clip: Pre-computed CLIP negative prompt embeddings prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen positive prompt negative_prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen negative prompt callback_on_step_end_tensor_inputs: Callback tensor inputs Raises: ValueError: If inputs are invalid """ if max_sequence_length is not None and max_sequence_length > 1024: raise ValueError("max_sequence_length must be less than 1024") if image is None: raise ValueError("`image` must be provided for image-to-image generation") if (width, height) not in self.resolutions: resolutions_str = ",".join([f"({w},{h})" for w, h in self.resolutions]) logger.warning( f"`height` and `width` have to be one of {resolutions_str}, but are {height} and {width}. Dimensions will be resized accordingly" ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) # Check for consistency within positive prompt embeddings and sequence lengths if prompt_embeds_qwen is not None or prompt_embeds_clip is not None or prompt_cu_seqlens is not None: if prompt_embeds_qwen is None or prompt_embeds_clip is None or prompt_cu_seqlens is None: raise ValueError( "If any of `prompt_embeds_qwen`, `prompt_embeds_clip`, or `prompt_cu_seqlens` is provided, " "all three must be provided." ) # Check for consistency within negative prompt embeddings and sequence lengths if ( negative_prompt_embeds_qwen is not None or negative_prompt_embeds_clip is not None or negative_prompt_cu_seqlens is not None ): if ( negative_prompt_embeds_qwen is None or negative_prompt_embeds_clip is None or negative_prompt_cu_seqlens is None ): raise ValueError( "If any of `negative_prompt_embeds_qwen`, `negative_prompt_embeds_clip`, or `negative_prompt_cu_seqlens` is provided, " "all three must be provided." ) # Check if prompt or embeddings are provided (either prompt or all required embedding components for positive) if prompt is None and prompt_embeds_qwen is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_qwen` (and corresponding `prompt_embeds_clip` and `prompt_cu_seqlens`). Cannot leave all undefined." ) # Validate types for prompt and negative_prompt if provided if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") def prepare_latents( self, image: PipelineImageInput, batch_size: int, num_channels_latents: int = 16, height: int = 1024, width: int = 1024, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: """ Prepare initial latent variables for image-to-image generation. This method creates random noise latents with encoded image, Args: image (PipelineImageInput): Input image to condition the generation on batch_size (int): Number of images to generate num_channels_latents (int): Number of channels in latent space height (int): Height of generated image width (int): Width of generated image dtype (torch.dtype): Data type for latents device (torch.device): Device to create latents on generator (torch.Generator): Random number generator latents (torch.Tensor): Pre-existing latents to use Returns: torch.Tensor: Prepared latent tensor with encoded image """ if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, 1, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, num_channels_latents, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) # Generate random noise for all frames latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # Encode the input image to use as first frame # Preprocess image image_tensor = self.image_processor.preprocess(image, height=height, width=width).to(device, dtype=dtype) # Encode image to latents using VAE with torch.no_grad(): image_latents = self.vae.encode(image_tensor).latent_dist.sample(generator=generator) image_latents = image_latents.unsqueeze(2) # Add temporal dimension # Normalize latents if needed if hasattr(self.vae.config, "scaling_factor"): image_latents = image_latents * self.vae.config.scaling_factor # Reshape to match latent dimensions [batch, 1, height, width, channels] image_latents = image_latents.permute(0, 2, 3, 4, 1) # [batch, 1, H, W, C] latents = torch.cat([latents, image_latents, torch.ones_like(latents[..., :1])], -1) return latents @property def guidance_scale(self): """Get the current guidance scale value.""" return self._guidance_scale @property def num_timesteps(self): """Get the number of denoising timesteps.""" return self._num_timesteps @property def interrupt(self): """Check if generation has been interrupted.""" return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, prompt: str | list[str] = None, negative_prompt: str | list[str] | None = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, guidance_scale: float = 3.5, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds_qwen: torch.Tensor | None = None, prompt_embeds_clip: torch.Tensor | None = None, negative_prompt_embeds_qwen: torch.Tensor | None = None, negative_prompt_embeds_clip: torch.Tensor | None = None, prompt_cu_seqlens: torch.Tensor | None = None, negative_prompt_cu_seqlens: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 1024, ): r""" The call function to the pipeline for image-to-image generation. Args: image (`PipelineImageInput`): The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, pass `prompt_embeds` instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts to avoid during image generation. If not defined, pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale` < `1`). height (`int`): The height in pixels of the generated image. width (`int`): The width in pixels of the generated image. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. guidance_scale (`float`, defaults to `5.0`): Guidance scale as defined in classifier-free guidance. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A torch generator to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents. prompt_embeds_qwen (`torch.Tensor`, *optional*): Pre-generated Qwen text embeddings. prompt_embeds_clip (`torch.Tensor`, *optional*): Pre-generated CLIP text embeddings. negative_prompt_embeds_qwen (`torch.Tensor`, *optional*): Pre-generated Qwen negative text embeddings. negative_prompt_embeds_clip (`torch.Tensor`, *optional*): Pre-generated CLIP negative text embeddings. prompt_cu_seqlens (`torch.Tensor`, *optional*): Pre-generated cumulative sequence lengths for Qwen positive prompt. negative_prompt_cu_seqlens (`torch.Tensor`, *optional*): Pre-generated cumulative sequence lengths for Qwen negative prompt. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`KandinskyImagePipelineOutput`]. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function that is called at the end of each denoising step. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. max_sequence_length (`int`, defaults to `1024`): The maximum sequence length for text and image qwen encoding. Must be less than 1024 Examples: Returns: [`~KandinskyImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`KandinskyImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct if height is None and width is None: width, height = image[0].size if isinstance(image, list) else image.size self.check_inputs( prompt=prompt, negative_prompt=negative_prompt, image=image, height=height, width=width, prompt_embeds_qwen=prompt_embeds_qwen, prompt_embeds_clip=prompt_embeds_clip, negative_prompt_embeds_qwen=negative_prompt_embeds_qwen, negative_prompt_embeds_clip=negative_prompt_embeds_clip, prompt_cu_seqlens=prompt_cu_seqlens, negative_prompt_cu_seqlens=negative_prompt_cu_seqlens, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) if (width, height) not in self.resolutions: width, height = self.resolutions[ np.argmin([abs((i[0] / i[1]) - (width / height)) for i in self.resolutions]) ] self._guidance_scale = guidance_scale self._interrupt = False device = self._execution_device dtype = self.transformer.dtype # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 prompt = [prompt] elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds_qwen.shape[0] # 3. Encode input prompt if prompt_embeds_qwen is None: prompt_embeds_qwen, prompt_embeds_clip, prompt_cu_seqlens = self.encode_prompt( prompt=prompt, image=image, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if self.guidance_scale > 1.0: if negative_prompt is None: negative_prompt = "" if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] * len(prompt) if prompt is not None else [negative_prompt] elif len(negative_prompt) != len(prompt): raise ValueError( f"`negative_prompt` must have same length as `prompt`. Got {len(negative_prompt)} vs {len(prompt)}." ) if negative_prompt_embeds_qwen is None: negative_prompt_embeds_qwen, negative_prompt_embeds_clip, negative_prompt_cu_seqlens = ( self.encode_prompt( prompt=negative_prompt, image=image, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables with image conditioning num_channels_latents = self.transformer.config.in_visual_dim latents = self.prepare_latents( image=image, batch_size=batch_size * num_images_per_prompt, num_channels_latents=num_channels_latents, height=height, width=width, dtype=dtype, device=device, generator=generator, latents=latents, ) # 6. Prepare rope positions for positional encoding visual_rope_pos = [ torch.arange(1, device=device), torch.arange(height // self.vae_scale_factor_spatial // 2, device=device), torch.arange(width // self.vae_scale_factor_spatial // 2, device=device), ] text_rope_pos = torch.arange(prompt_cu_seqlens.diff().max().item(), device=device) negative_text_rope_pos = ( torch.arange(negative_prompt_cu_seqlens.diff().max().item(), device=device) if negative_prompt_cu_seqlens is not None else None ) # 7. Calculate dynamic scale factor based on resolution scale_factor = [1.0, 1.0, 1.0] # 8. Sparse Params for efficient attention sparse_params = None # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue timestep = t.unsqueeze(0).repeat(batch_size * num_images_per_prompt) # Predict noise residual pred_velocity = self.transformer( hidden_states=latents.to(dtype), encoder_hidden_states=prompt_embeds_qwen.to(dtype), pooled_projections=prompt_embeds_clip.to(dtype), timestep=timestep.to(dtype), visual_rope_pos=visual_rope_pos, text_rope_pos=text_rope_pos, scale_factor=scale_factor, sparse_params=sparse_params, return_dict=True, ).sample if self.guidance_scale > 1.0 and negative_prompt_embeds_qwen is not None: uncond_pred_velocity = self.transformer( hidden_states=latents.to(dtype), encoder_hidden_states=negative_prompt_embeds_qwen.to(dtype), pooled_projections=negative_prompt_embeds_clip.to(dtype), timestep=timestep.to(dtype), visual_rope_pos=visual_rope_pos, text_rope_pos=negative_text_rope_pos, scale_factor=scale_factor, sparse_params=sparse_params, return_dict=True, ).sample pred_velocity = uncond_pred_velocity + guidance_scale * (pred_velocity - uncond_pred_velocity) latents[:, :, :, :, :num_channels_latents] = self.scheduler.step( pred_velocity[:, :], t, latents[:, :, :, :, :num_channels_latents], return_dict=False )[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds_qwen = callback_outputs.pop("prompt_embeds_qwen", prompt_embeds_qwen) prompt_embeds_clip = callback_outputs.pop("prompt_embeds_clip", prompt_embeds_clip) negative_prompt_embeds_qwen = callback_outputs.pop( "negative_prompt_embeds_qwen", negative_prompt_embeds_qwen ) negative_prompt_embeds_clip = callback_outputs.pop( "negative_prompt_embeds_clip", negative_prompt_embeds_clip ) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # 9. Post-processing - extract main latents latents = latents[:, :, :, :, :num_channels_latents] # 10. Decode latents to image if output_type != "latent": latents = latents.to(self.vae.dtype) # Reshape and normalize latents latents = latents.reshape( batch_size, num_images_per_prompt, 1, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, num_channels_latents, ) latents = latents.permute(0, 1, 5, 2, 3, 4) # [batch, num_images, channels, 1, height, width] latents = latents.reshape( batch_size * num_images_per_prompt, num_channels_latents, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) # Normalize and decode through VAE latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents).sample image = self.image_processor.postprocess(image, output_type=output_type) else: image = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return KandinskyImagePipelineOutput(image=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/kandinsky5/pipeline_kandinsky_i2i.py", "license": "Apache License 2.0", "lines": 737, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/kandinsky5/pipeline_kandinsky_i2v.py
# Copyright 2025 The Kandinsky Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from typing import Callable import regex as re import torch from torch.nn import functional as F from transformers import CLIPTextModel, CLIPTokenizer, Qwen2_5_VLForConditionalGeneration, Qwen2VLProcessor from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import KandinskyLoraLoaderMixin from ...models import AutoencoderKLHunyuanVideo from ...models.transformers import Kandinsky5Transformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler # Add imports for offloading and tiling from ...utils import ( is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import KandinskyPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import Kandinsky5I2VPipeline >>> from diffusers.utils import export_to_video, load_image >>> # Available models: >>> # kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s-Diffusers >>> model_id = "kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s-Diffusers" >>> pipe = Kandinsky5I2VPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16) >>> pipe = pipe.to("cuda") >>> image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg" ... ) >>> prompt = "An astronaut floating in space with Earth in the background, cinematic shot" >>> negative_prompt = "Static, 2D cartoon, cartoon, 2d animation, paintings, images, worst quality, low quality, ugly, deformed, walking backwards" >>> output = pipe( ... image=image, ... prompt=prompt, ... negative_prompt=negative_prompt, ... height=512, ... width=768, ... num_frames=121, ... num_inference_steps=50, ... guidance_scale=5.0, ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=24, quality=9) ``` """ def basic_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Clean text using ftfy if available and unescape HTML entities. """ if is_ftfy_available(): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Normalize whitespace in text by replacing multiple spaces with single space. """ text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Apply both basic cleaning and whitespace normalization to prompts. """ text = whitespace_clean(basic_clean(text)) return text class Kandinsky5I2VPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin): r""" Pipeline for image-to-video generation using Kandinsky 5.0. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: transformer ([`Kandinsky5Transformer3DModel`]): Conditional Transformer to denoise the encoded video latents. vae ([`AutoencoderKLHunyuanVideo`]): Variational Auto-Encoder Model [hunyuanvideo-community/HunyuanVideo (vae)](https://huggingface.co/hunyuanvideo-community/HunyuanVideo) to encode and decode videos to and from latent representations. text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): Frozen text-encoder [Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct). tokenizer ([`AutoProcessor`]): Tokenizer for Qwen2.5-VL. text_encoder_2 ([`CLIPTextModel`]): Frozen [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer_2 ([`CLIPTokenizer`]): Tokenizer for CLIP. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded video latents. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _callback_tensor_inputs = [ "latents", "prompt_embeds_qwen", "prompt_embeds_clip", "negative_prompt_embeds_qwen", "negative_prompt_embeds_clip", ] def __init__( self, transformer: Kandinsky5Transformer3DModel, vae: AutoencoderKLHunyuanVideo, text_encoder: Qwen2_5_VLForConditionalGeneration, tokenizer: Qwen2VLProcessor, text_encoder_2: CLIPTextModel, tokenizer_2: CLIPTokenizer, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( transformer=transformer, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, scheduler=scheduler, ) self.prompt_template = "\n".join( [ "<|im_start|>system\nYou are a promt engineer. Describe the video in detail.", "Describe how the camera moves or shakes, describe the zoom and view angle, whether it follows the objects.", "Describe the location of the video, main characters or objects and their action.", "Describe the dynamism of the video and presented actions.", "Name the visual style of the video: whether it is a professional footage, user generated content, some kind of animation, video game or scren content.", "Describe the visual effects, postprocessing and transitions if they are presented in the video.", "Pay attention to the order of key actions shown in the scene.<|im_end|>", "<|im_start|>user\n{}<|im_end|>", ] ) self.prompt_template_encode_start_idx = 129 self.vae_scale_factor_temporal = ( self.vae.config.temporal_compression_ratio if getattr(self, "vae", None) else 4 ) self.vae_scale_factor_spatial = self.vae.config.spatial_compression_ratio if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_scale_factor(self, height: int, width: int) -> tuple: """ Calculate the scale factor based on resolution. Args: height (int): Video height width (int): Video width Returns: tuple: Scale factor as (temporal_scale, height_scale, width_scale) """ def between_480p(x): return 480 <= x <= 854 if between_480p(height) and between_480p(width): return (1, 2, 2) else: return (1, 3.16, 3.16) @staticmethod def fast_sta_nabla(T: int, H: int, W: int, wT: int = 3, wH: int = 3, wW: int = 3, device="cuda") -> torch.Tensor: """ Create a sparse temporal attention (STA) mask for efficient video generation. This method generates a mask that limits attention to nearby frames and spatial positions, reducing computational complexity for video generation. Args: T (int): Number of temporal frames H (int): Height in latent space W (int): Width in latent space wT (int): Temporal attention window size wH (int): Height attention window size wW (int): Width attention window size device (str): Device to create tensor on Returns: torch.Tensor: Sparse attention mask of shape (T*H*W, T*H*W) """ l = torch.Tensor([T, H, W]).amax() r = torch.arange(0, l, 1, dtype=torch.int16, device=device) mat = (r.unsqueeze(1) - r.unsqueeze(0)).abs() sta_t, sta_h, sta_w = ( mat[:T, :T].flatten(), mat[:H, :H].flatten(), mat[:W, :W].flatten(), ) sta_t = sta_t <= wT // 2 sta_h = sta_h <= wH // 2 sta_w = sta_w <= wW // 2 sta_hw = (sta_h.unsqueeze(1) * sta_w.unsqueeze(0)).reshape(H, H, W, W).transpose(1, 2).flatten() sta = (sta_t.unsqueeze(1) * sta_hw.unsqueeze(0)).reshape(T, T, H * W, H * W).transpose(1, 2) return sta.reshape(T * H * W, T * H * W) def get_sparse_params(self, sample, device): """ Generate sparse attention parameters for the transformer based on sample dimensions. This method computes the sparse attention configuration needed for efficient video processing in the transformer model. Args: sample (torch.Tensor): Input sample tensor device (torch.device): Device to place tensors on Returns: Dict: Dictionary containing sparse attention parameters """ assert self.transformer.config.patch_size[0] == 1 B, T, H, W, _ = sample.shape T, H, W = ( T // self.transformer.config.patch_size[0], H // self.transformer.config.patch_size[1], W // self.transformer.config.patch_size[2], ) if self.transformer.config.attention_type == "nabla": sta_mask = self.fast_sta_nabla( T, H // 8, W // 8, self.transformer.config.attention_wT, self.transformer.config.attention_wH, self.transformer.config.attention_wW, device=device, ) sparse_params = { "sta_mask": sta_mask.unsqueeze_(0).unsqueeze_(0), "attention_type": self.transformer.config.attention_type, "to_fractal": True, "P": self.transformer.config.attention_P, "wT": self.transformer.config.attention_wT, "wW": self.transformer.config.attention_wW, "wH": self.transformer.config.attention_wH, "add_sta": self.transformer.config.attention_add_sta, "visual_shape": (T, H, W), "method": self.transformer.config.attention_method, } else: sparse_params = None return sparse_params def _encode_prompt_qwen( self, prompt: str | list[str], device: torch.device | None = None, max_sequence_length: int = 256, dtype: torch.dtype | None = None, ): """ Encode prompt using Qwen2.5-VL text encoder. This method processes the input prompt through the Qwen2.5-VL model to generate text embeddings suitable for video generation. Args: prompt (str | list[str]): Input prompt or list of prompts device (torch.device): Device to run encoding on max_sequence_length (int): Maximum sequence length for tokenization dtype (torch.dtype): Data type for embeddings Returns: tuple[torch.Tensor, torch.Tensor]: Text embeddings and cumulative sequence lengths """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype full_texts = [self.prompt_template.format(p) for p in prompt] max_allowed_len = self.prompt_template_encode_start_idx + max_sequence_length untruncated_ids = self.tokenizer( text=full_texts, images=None, videos=None, return_tensors="pt", padding="longest", )["input_ids"] if untruncated_ids.shape[-1] > max_allowed_len: for i, text in enumerate(full_texts): tokens = untruncated_ids[i][self.prompt_template_encode_start_idx : -2] removed_text = self.tokenizer.decode(tokens[max_sequence_length - 2 :]) if len(removed_text) > 0: full_texts[i] = text[: -len(removed_text)] logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) inputs = self.tokenizer( text=full_texts, images=None, videos=None, max_length=max_allowed_len, truncation=True, return_tensors="pt", padding=True, ).to(device) embeds = self.text_encoder( input_ids=inputs["input_ids"], return_dict=True, output_hidden_states=True, )["hidden_states"][-1][:, self.prompt_template_encode_start_idx :] attention_mask = inputs["attention_mask"][:, self.prompt_template_encode_start_idx :] cu_seqlens = torch.cumsum(attention_mask.sum(1), dim=0) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0).to(dtype=torch.int32) return embeds.to(dtype), cu_seqlens def _encode_prompt_clip( self, prompt: str | list[str], device: torch.device | None = None, dtype: torch.dtype | None = None, ): """ Encode prompt using CLIP text encoder. This method processes the input prompt through the CLIP model to generate pooled embeddings that capture semantic information. Args: prompt (str | list[str]): Input prompt or list of prompts device (torch.device): Device to run encoding on dtype (torch.dtype): Data type for embeddings Returns: torch.Tensor: Pooled text embeddings from CLIP """ device = device or self._execution_device dtype = dtype or self.text_encoder_2.dtype inputs = self.tokenizer_2( prompt, max_length=77, truncation=True, add_special_tokens=True, padding="max_length", return_tensors="pt", ).to(device) pooled_embed = self.text_encoder_2(**inputs)["pooler_output"] return pooled_embed.to(dtype) @staticmethod def adaptive_mean_std_normalization(source, reference): source_mean = source.mean(dim=(1, 2, 3, 4), keepdim=True) source_std = source.std(dim=(1, 2, 3, 4), keepdim=True) # magic constants - limit changes in latents clump_mean_low = 0.05 clump_mean_high = 0.1 clump_std_low = 0.1 clump_std_high = 0.25 reference_mean = torch.clamp(reference.mean(), source_mean - clump_mean_low, source_mean + clump_mean_high) reference_std = torch.clamp(reference.std(), source_std - clump_std_low, source_std + clump_std_high) # normalization normalized = (source - source_mean) / source_std normalized = normalized * reference_std + reference_mean return normalized def normalize_first_frame(self, latents, reference_frames=5, clump_values=False): latents_copy = latents.clone() samples = latents_copy if samples.shape[1] <= 1: return (latents, "Only one frame, no normalization needed") nFr = 4 first_frames = samples.clone()[:, :nFr] reference_frames_data = samples[:, nFr : nFr + min(reference_frames, samples.shape[1] - 1)] normalized_first = self.adaptive_mean_std_normalization(first_frames, reference_frames_data) if clump_values: min_val = reference_frames_data.min() max_val = reference_frames_data.max() normalized_first = torch.clamp(normalized_first, min_val, max_val) samples[:, :nFr] = normalized_first return samples def encode_prompt( self, prompt: str | list[str], num_videos_per_prompt: int = 1, max_sequence_length: int = 512, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes a single prompt (positive or negative) into text encoder hidden states. This method combines embeddings from both Qwen2.5-VL and CLIP text encoders to create comprehensive text representations for video generation. Args: prompt (`str` or `list[str]`): Prompt to be encoded. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos to generate per prompt. max_sequence_length (`int`, *optional*, defaults to 512): Maximum sequence length for text encoding. device (`torch.device`, *optional*): Torch device. dtype (`torch.dtype`, *optional*): Torch dtype. Returns: tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - Qwen text embeddings of shape (batch_size * num_videos_per_prompt, sequence_length, embedding_dim) - CLIP pooled embeddings of shape (batch_size * num_videos_per_prompt, clip_embedding_dim) - Cumulative sequence lengths (`cu_seqlens`) for Qwen embeddings of shape (batch_size * num_videos_per_prompt + 1,) """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype if not isinstance(prompt, list): prompt = [prompt] batch_size = len(prompt) prompt = [prompt_clean(p) for p in prompt] # Encode with Qwen2.5-VL prompt_embeds_qwen, prompt_cu_seqlens = self._encode_prompt_qwen( prompt=prompt, device=device, max_sequence_length=max_sequence_length, dtype=dtype, ) # prompt_embeds_qwen shape: [batch_size, seq_len, embed_dim] # Encode with CLIP prompt_embeds_clip = self._encode_prompt_clip( prompt=prompt, device=device, dtype=dtype, ) # prompt_embeds_clip shape: [batch_size, clip_embed_dim] # Repeat embeddings for num_videos_per_prompt # Qwen embeddings: repeat sequence for each video, then reshape prompt_embeds_qwen = prompt_embeds_qwen.repeat( 1, num_videos_per_prompt, 1 ) # [batch_size, seq_len * num_videos_per_prompt, embed_dim] # Reshape to [batch_size * num_videos_per_prompt, seq_len, embed_dim] prompt_embeds_qwen = prompt_embeds_qwen.view( batch_size * num_videos_per_prompt, -1, prompt_embeds_qwen.shape[-1] ) # CLIP embeddings: repeat for each video prompt_embeds_clip = prompt_embeds_clip.repeat( 1, num_videos_per_prompt, 1 ) # [batch_size, num_videos_per_prompt, clip_embed_dim] # Reshape to [batch_size * num_videos_per_prompt, clip_embed_dim] prompt_embeds_clip = prompt_embeds_clip.view(batch_size * num_videos_per_prompt, -1) # Repeat cumulative sequence lengths for num_videos_per_prompt # Original differences (lengths) for each prompt in the batch original_lengths = prompt_cu_seqlens.diff() # [len1, len2, ...] # Repeat the lengths for num_videos_per_prompt repeated_lengths = original_lengths.repeat_interleave( num_videos_per_prompt ) # [len1, len1, ..., len2, len2, ...] # Reconstruct the cumulative lengths repeated_cu_seqlens = torch.cat( [torch.tensor([0], device=device, dtype=torch.int32), repeated_lengths.cumsum(0)] ) return prompt_embeds_qwen, prompt_embeds_clip, repeated_cu_seqlens def check_inputs( self, prompt, negative_prompt, image, height, width, prompt_embeds_qwen=None, prompt_embeds_clip=None, negative_prompt_embeds_qwen=None, negative_prompt_embeds_clip=None, prompt_cu_seqlens=None, negative_prompt_cu_seqlens=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): """ Validate input parameters for the pipeline. Args: prompt: Input prompt negative_prompt: Negative prompt for guidance image: Input image for conditioning height: Video height width: Video width prompt_embeds_qwen: Pre-computed Qwen prompt embeddings prompt_embeds_clip: Pre-computed CLIP prompt embeddings negative_prompt_embeds_qwen: Pre-computed Qwen negative prompt embeddings negative_prompt_embeds_clip: Pre-computed CLIP negative prompt embeddings prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen positive prompt negative_prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen negative prompt callback_on_step_end_tensor_inputs: Callback tensor inputs Raises: ValueError: If inputs are invalid """ if max_sequence_length is not None and max_sequence_length > 1024: raise ValueError("max_sequence_length must be less than 1024") if image is None: raise ValueError("`image` must be provided for image-to-video generation") if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) # Check for consistency within positive prompt embeddings and sequence lengths if prompt_embeds_qwen is not None or prompt_embeds_clip is not None or prompt_cu_seqlens is not None: if prompt_embeds_qwen is None or prompt_embeds_clip is None or prompt_cu_seqlens is None: raise ValueError( "If any of `prompt_embeds_qwen`, `prompt_embeds_clip`, or `prompt_cu_seqlens` is provided, " "all three must be provided." ) # Check for consistency within negative prompt embeddings and sequence lengths if ( negative_prompt_embeds_qwen is not None or negative_prompt_embeds_clip is not None or negative_prompt_cu_seqlens is not None ): if ( negative_prompt_embeds_qwen is None or negative_prompt_embeds_clip is None or negative_prompt_cu_seqlens is None ): raise ValueError( "If any of `negative_prompt_embeds_qwen`, `negative_prompt_embeds_clip`, or `negative_prompt_cu_seqlens` is provided, " "all three must be provided." ) # Check if prompt or embeddings are provided (either prompt or all required embedding components for positive) if prompt is None and prompt_embeds_qwen is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_qwen` (and corresponding `prompt_embeds_clip` and `prompt_cu_seqlens`). Cannot leave all undefined." ) # Validate types for prompt and negative_prompt if provided if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") def prepare_latents( self, image: PipelineImageInput, batch_size: int, num_channels_latents: int = 16, height: int = 480, width: int = 832, num_frames: int = 81, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: """ Prepare initial latent variables for image-to-video generation. This method creates random noise latents for all frames except the first frame, which is replaced with the encoded input image. Args: image (PipelineImageInput): Input image to condition the generation on batch_size (int): Number of videos to generate num_channels_latents (int): Number of channels in latent space height (int): Height of generated video width (int): Width of generated video num_frames (int): Number of frames in video dtype (torch.dtype): Data type for latents device (torch.device): Device to create latents on generator (torch.Generator): Random number generator latents (torch.Tensor): Pre-existing latents to use Returns: torch.Tensor: Prepared latent tensor with first frame as encoded image """ if latents is not None: return latents.to(device=device, dtype=dtype) num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 shape = ( batch_size, num_latent_frames, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, num_channels_latents, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) # Generate random noise for all frames latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # Encode the input image to use as first frame # Preprocess image image_tensor = self.video_processor.preprocess(image, height=height, width=width).to(device, dtype=dtype) # Encode image to latents using VAE with torch.no_grad(): # Convert image to video format [batch, channels, 1, height, width] image_video = image_tensor.unsqueeze(2) # Add temporal dimension image_latents = self.vae.encode(image_video).latent_dist.sample(generator=generator) # Normalize latents if needed if hasattr(self.vae.config, "scaling_factor"): image_latents = image_latents * self.vae.config.scaling_factor # Reshape to match latent dimensions [batch, frames, height, width, channels] image_latents = image_latents.permute(0, 2, 3, 4, 1) # [batch, 1, H, W, C] # Replace first frame with encoded image latents[:, 0:1] = image_latents if self.transformer.visual_cond: # For visual conditioning, concatenate with zeros and mask visual_cond = torch.zeros_like(latents) visual_cond_mask = torch.zeros( [ batch_size, num_latent_frames, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, 1, ], dtype=latents.dtype, device=latents.device, ) visual_cond_mask[:, 0:1] = 1 visual_cond[:, 0:1] = image_latents latents = torch.cat([latents, visual_cond, visual_cond_mask], dim=-1) return latents @property def guidance_scale(self): """Get the current guidance scale value.""" return self._guidance_scale @property def num_timesteps(self): """Get the number of denoising timesteps.""" return self._num_timesteps @property def interrupt(self): """Check if generation has been interrupted.""" return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, prompt: str | list[str] = None, negative_prompt: str | list[str] | None = None, height: int = 512, width: int = 768, num_frames: int = 121, num_inference_steps: int = 50, guidance_scale: float = 5.0, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds_qwen: torch.Tensor | None = None, prompt_embeds_clip: torch.Tensor | None = None, negative_prompt_embeds_qwen: torch.Tensor | None = None, negative_prompt_embeds_clip: torch.Tensor | None = None, prompt_cu_seqlens: torch.Tensor | None = None, negative_prompt_cu_seqlens: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" The call function to the pipeline for image-to-video generation. Args: image (`PipelineImageInput`): The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the video generation. If not defined, pass `prompt_embeds` instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts to avoid during video generation. If not defined, pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale` < `1`). height (`int`, defaults to `512`): The height in pixels of the generated video. width (`int`, defaults to `768`): The width in pixels of the generated video. num_frames (`int`, defaults to `121`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. guidance_scale (`float`, defaults to `5.0`): Guidance scale as defined in classifier-free guidance. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A torch generator to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents. prompt_embeds_qwen (`torch.Tensor`, *optional*): Pre-generated Qwen text embeddings. prompt_embeds_clip (`torch.Tensor`, *optional*): Pre-generated CLIP text embeddings. negative_prompt_embeds_qwen (`torch.Tensor`, *optional*): Pre-generated Qwen negative text embeddings. negative_prompt_embeds_clip (`torch.Tensor`, *optional*): Pre-generated CLIP negative text embeddings. prompt_cu_seqlens (`torch.Tensor`, *optional*): Pre-generated cumulative sequence lengths for Qwen positive prompt. negative_prompt_cu_seqlens (`torch.Tensor`, *optional*): Pre-generated cumulative sequence lengths for Qwen negative prompt. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated video. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`KandinskyPipelineOutput`]. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function that is called at the end of each denoising step. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. max_sequence_length (`int`, defaults to `512`): The maximum sequence length for text encoding. Examples: Returns: [`~KandinskyPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`KandinskyPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated videos. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, negative_prompt=negative_prompt, image=image, height=height, width=width, prompt_embeds_qwen=prompt_embeds_qwen, prompt_embeds_clip=prompt_embeds_clip, negative_prompt_embeds_qwen=negative_prompt_embeds_qwen, negative_prompt_embeds_clip=negative_prompt_embeds_clip, prompt_cu_seqlens=prompt_cu_seqlens, negative_prompt_cu_seqlens=negative_prompt_cu_seqlens, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) if num_frames % self.vae_scale_factor_temporal != 1: logger.warning( f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number." ) num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 num_frames = max(num_frames, 1) self._guidance_scale = guidance_scale self._interrupt = False device = self._execution_device dtype = self.transformer.dtype # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 prompt = [prompt] elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds_qwen.shape[0] # 3. Encode input prompt if prompt_embeds_qwen is None: prompt_embeds_qwen, prompt_embeds_clip, prompt_cu_seqlens = self.encode_prompt( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if self.guidance_scale > 1.0: if negative_prompt is None: negative_prompt = "Static, 2D cartoon, cartoon, 2d animation, paintings, images, worst quality, low quality, ugly, deformed, walking backwards" if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] * len(prompt) if prompt is not None else [negative_prompt] elif len(negative_prompt) != len(prompt): raise ValueError( f"`negative_prompt` must have same length as `prompt`. Got {len(negative_prompt)} vs {len(prompt)}." ) if negative_prompt_embeds_qwen is None: negative_prompt_embeds_qwen, negative_prompt_embeds_clip, negative_prompt_cu_seqlens = ( self.encode_prompt( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables with image conditioning num_channels_latents = self.transformer.config.in_visual_dim latents = self.prepare_latents( image=image, batch_size=batch_size * num_videos_per_prompt, num_channels_latents=num_channels_latents, height=height, width=width, num_frames=num_frames, dtype=dtype, device=device, generator=generator, latents=latents, ) # 6. Prepare rope positions for positional encoding num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 visual_rope_pos = [ torch.arange(num_latent_frames, device=device), torch.arange(height // self.vae_scale_factor_spatial // 2, device=device), torch.arange(width // self.vae_scale_factor_spatial // 2, device=device), ] text_rope_pos = torch.arange(prompt_cu_seqlens.diff().max().item(), device=device) negative_text_rope_pos = ( torch.arange(negative_prompt_cu_seqlens.diff().max().item(), device=device) if negative_prompt_cu_seqlens is not None else None ) # 7. Calculate dynamic scale factor based on resolution scale_factor = self._get_scale_factor(height, width) # 8. Sparse Params for efficient attention sparse_params = self.get_sparse_params(latents, device) # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue timestep = t.unsqueeze(0).repeat(batch_size * num_videos_per_prompt) # Predict noise residual pred_velocity = self.transformer( hidden_states=latents.to(dtype), encoder_hidden_states=prompt_embeds_qwen.to(dtype), pooled_projections=prompt_embeds_clip.to(dtype), timestep=timestep.to(dtype), visual_rope_pos=visual_rope_pos, text_rope_pos=text_rope_pos, scale_factor=scale_factor, sparse_params=sparse_params, return_dict=True, ).sample if self.guidance_scale > 1.0 and negative_prompt_embeds_qwen is not None: uncond_pred_velocity = self.transformer( hidden_states=latents.to(dtype), encoder_hidden_states=negative_prompt_embeds_qwen.to(dtype), pooled_projections=negative_prompt_embeds_clip.to(dtype), timestep=timestep.to(dtype), visual_rope_pos=visual_rope_pos, text_rope_pos=negative_text_rope_pos, scale_factor=scale_factor, sparse_params=sparse_params, return_dict=True, ).sample pred_velocity = uncond_pred_velocity + guidance_scale * (pred_velocity - uncond_pred_velocity) latents[:, 1:, :, :, :num_channels_latents] = self.scheduler.step( pred_velocity[:, 1:], t, latents[:, 1:, :, :, :num_channels_latents], return_dict=False )[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds_qwen = callback_outputs.pop("prompt_embeds_qwen", prompt_embeds_qwen) prompt_embeds_clip = callback_outputs.pop("prompt_embeds_clip", prompt_embeds_clip) negative_prompt_embeds_qwen = callback_outputs.pop( "negative_prompt_embeds_qwen", negative_prompt_embeds_qwen ) negative_prompt_embeds_clip = callback_outputs.pop( "negative_prompt_embeds_clip", negative_prompt_embeds_clip ) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # 9. Post-processing - extract main latents latents = latents[:, :, :, :, :num_channels_latents] # 10. fix mesh artifacts latents = self.normalize_first_frame(latents) # 11. Decode latents to video if output_type != "latent": latents = latents.to(self.vae.dtype) # Reshape and normalize latents video = latents.reshape( batch_size, num_videos_per_prompt, (num_frames - 1) // self.vae_scale_factor_temporal + 1, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, num_channels_latents, ) video = video.permute(0, 1, 5, 2, 3, 4) # [batch, num_videos, channels, frames, height, width] video = video.reshape( batch_size * num_videos_per_prompt, num_channels_latents, (num_frames - 1) // self.vae_scale_factor_temporal + 1, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) # Normalize and decode through VAE video = video / self.vae.config.scaling_factor video = self.vae.decode(video).sample video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return KandinskyPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/kandinsky5/pipeline_kandinsky_i2v.py", "license": "Apache License 2.0", "lines": 893, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/kandinsky5/pipeline_kandinsky_t2i.py
# Copyright 2025 The Kandinsky Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from typing import Callable import numpy as np import regex as re import torch from torch.nn import functional as F from transformers import CLIPTextModel, CLIPTokenizer, Qwen2_5_VLForConditionalGeneration, Qwen2VLProcessor from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import VaeImageProcessor from ...loaders import KandinskyLoraLoaderMixin from ...models import AutoencoderKL from ...models.transformers import Kandinsky5Transformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler # Add imports for offloading and tiling from ...utils import ( is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import KandinskyImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import Kandinsky5T2IPipeline >>> # Available models: >>> # kandinskylab/Kandinsky-5.0-T2I-Lite-sft-Diffusers >>> # kandinskylab/Kandinsky-5.0-T2I-Lite-pretrain-Diffusers >>> model_id = "kandinskylab/Kandinsky-5.0-T2I-Lite-sft-Diffusers" >>> pipe = Kandinsky5T2IPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16) >>> pipe = pipe.to("cuda") >>> prompt = "A cat and a dog baking a cake together in a kitchen." >>> output = pipe( ... prompt=prompt, ... negative_prompt="", ... height=1024, ... width=1024, ... num_inference_steps=50, ... guidance_scale=3.5, ... ).frames[0] ``` """ def basic_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Clean text using ftfy if available and unescape HTML entities. """ if is_ftfy_available(): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Normalize whitespace in text by replacing multiple spaces with single space. """ text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): """ Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py Apply both basic cleaning and whitespace normalization to prompts. """ text = whitespace_clean(basic_clean(text)) return text class Kandinsky5T2IPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin): r""" Pipeline for text-to-image generation using Kandinsky 5.0. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: transformer ([`Kandinsky5Transformer3DModel`]): Conditional Transformer to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder Model [black-forest-labs/FLUX.1-dev (vae)](https://huggingface.co/black-forest-labs/FLUX.1-dev) to encode and decode videos to and from latent representations. text_encoder ([`Qwen2_5_VLForConditionalGeneration`]): Frozen text-encoder [Qwen2.5-VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct). tokenizer ([`AutoProcessor`]): Tokenizer for Qwen2.5-VL. text_encoder_2 ([`CLIPTextModel`]): Frozen [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer_2 ([`CLIPTokenizer`]): Tokenizer for CLIP. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _callback_tensor_inputs = [ "latents", "prompt_embeds_qwen", "prompt_embeds_clip", "negative_prompt_embeds_qwen", "negative_prompt_embeds_clip", ] def __init__( self, transformer: Kandinsky5Transformer3DModel, vae: AutoencoderKL, text_encoder: Qwen2_5_VLForConditionalGeneration, tokenizer: Qwen2VLProcessor, text_encoder_2: CLIPTextModel, tokenizer_2: CLIPTokenizer, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( transformer=transformer, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, scheduler=scheduler, ) self.prompt_template = "<|im_start|>system\nYou are a promt engineer. Describe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n{}<|im_end|>" self.prompt_template_encode_start_idx = 41 self.vae_scale_factor_spatial = 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor_spatial) self.resolutions = [(1024, 1024), (640, 1408), (1408, 640), (768, 1280), (1280, 768), (896, 1152), (1152, 896)] def _encode_prompt_qwen( self, prompt: list[str], device: torch.device | None = None, max_sequence_length: int = 512, dtype: torch.dtype | None = None, ): """ Encode prompt using Qwen2.5-VL text encoder. This method processes the input prompt through the Qwen2.5-VL model to generate text embeddings suitable for image generation. Args: prompt list[str]: Input list of prompts device (torch.device): Device to run encoding on max_sequence_length (int): Maximum sequence length for tokenization dtype (torch.dtype): Data type for embeddings Returns: tuple[torch.Tensor, torch.Tensor]: Text embeddings and cumulative sequence lengths """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype full_texts = [self.prompt_template.format(p) for p in prompt] max_allowed_len = self.prompt_template_encode_start_idx + max_sequence_length untruncated_ids = self.tokenizer( text=full_texts, images=None, videos=None, return_tensors="pt", padding="longest", )["input_ids"] if untruncated_ids.shape[-1] > max_allowed_len: for i, text in enumerate(full_texts): tokens = untruncated_ids[i][self.prompt_template_encode_start_idx : -2] removed_text = self.tokenizer.decode(tokens[max_sequence_length - 2 :]) if len(removed_text) > 0: full_texts[i] = text[: -len(removed_text)] logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) inputs = self.tokenizer( text=full_texts, images=None, videos=None, max_length=max_allowed_len, truncation=True, return_tensors="pt", padding=True, ).to(device) embeds = self.text_encoder( input_ids=inputs["input_ids"], return_dict=True, output_hidden_states=True, )["hidden_states"][-1][:, self.prompt_template_encode_start_idx :] attention_mask = inputs["attention_mask"][:, self.prompt_template_encode_start_idx :] cu_seqlens = torch.cumsum(attention_mask.sum(1), dim=0) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0).to(dtype=torch.int32) return embeds.to(dtype), cu_seqlens def _encode_prompt_clip( self, prompt: str | list[str], device: torch.device | None = None, dtype: torch.dtype | None = None, ): """ Encode prompt using CLIP text encoder. This method processes the input prompt through the CLIP model to generate pooled embeddings that capture semantic information. Args: prompt (str | list[str]): Input prompt or list of prompts device (torch.device): Device to run encoding on dtype (torch.dtype): Data type for embeddings Returns: torch.Tensor: Pooled text embeddings from CLIP """ device = device or self._execution_device dtype = dtype or self.text_encoder_2.dtype inputs = self.tokenizer_2( prompt, max_length=77, truncation=True, add_special_tokens=True, padding="max_length", return_tensors="pt", ).to(device) pooled_embed = self.text_encoder_2(**inputs)["pooler_output"] return pooled_embed.to(dtype) def encode_prompt( self, prompt: str | list[str], num_images_per_prompt: int = 1, max_sequence_length: int = 512, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes a single prompt (positive or negative) into text encoder hidden states. This method combines embeddings from both Qwen2.5-VL and CLIP text encoders to create comprehensive text representations for image generation. Args: prompt (`str` or `list[str]`): Prompt to be encoded. num_images_per_prompt (`int`, *optional*, defaults to 1): Number of images to generate per prompt. max_sequence_length (`int`, *optional*, defaults to 512): Maximum sequence length for text encoding. Must be less than 1024 device (`torch.device`, *optional*): Torch device. dtype (`torch.dtype`, *optional*): Torch dtype. Returns: tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - Qwen text embeddings of shape (batch_size * num_images_per_prompt, sequence_length, embedding_dim) - CLIP pooled embeddings of shape (batch_size * num_images_per_prompt, clip_embedding_dim) - Cumulative sequence lengths (`cu_seqlens`) for Qwen embeddings of shape (batch_size * num_images_per_prompt + 1,) """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype if not isinstance(prompt, list): prompt = [prompt] batch_size = len(prompt) prompt = [prompt_clean(p) for p in prompt] # Encode with Qwen2.5-VL prompt_embeds_qwen, prompt_cu_seqlens = self._encode_prompt_qwen( prompt=prompt, device=device, max_sequence_length=max_sequence_length, dtype=dtype, ) # prompt_embeds_qwen shape: [batch_size, seq_len, embed_dim] # Encode with CLIP prompt_embeds_clip = self._encode_prompt_clip( prompt=prompt, device=device, dtype=dtype, ) # prompt_embeds_clip shape: [batch_size, clip_embed_dim] # Repeat embeddings for num_images_per_prompt # Qwen embeddings: repeat sequence for each image, then reshape prompt_embeds_qwen = prompt_embeds_qwen.repeat( 1, num_images_per_prompt, 1 ) # [batch_size, seq_len * num_images_per_prompt, embed_dim] # Reshape to [batch_size * num_images_per_prompt, seq_len, embed_dim] prompt_embeds_qwen = prompt_embeds_qwen.view( batch_size * num_images_per_prompt, -1, prompt_embeds_qwen.shape[-1] ) # CLIP embeddings: repeat for each image prompt_embeds_clip = prompt_embeds_clip.repeat( 1, num_images_per_prompt, 1 ) # [batch_size, num_images_per_prompt, clip_embed_dim] # Reshape to [batch_size * num_images_per_prompt, clip_embed_dim] prompt_embeds_clip = prompt_embeds_clip.view(batch_size * num_images_per_prompt, -1) # Repeat cumulative sequence lengths for num_images_per_prompt # Original differences (lengths) for each prompt in the batch original_lengths = prompt_cu_seqlens.diff() # [len1, len2, ...] # Repeat the lengths for num_images_per_prompt repeated_lengths = original_lengths.repeat_interleave( num_images_per_prompt ) # [len1, len1, ..., len2, len2, ...] # Reconstruct the cumulative lengths repeated_cu_seqlens = torch.cat( [torch.tensor([0], device=device, dtype=torch.int32), repeated_lengths.cumsum(0)] ) return prompt_embeds_qwen, prompt_embeds_clip, repeated_cu_seqlens def check_inputs( self, prompt, negative_prompt, height, width, prompt_embeds_qwen=None, prompt_embeds_clip=None, negative_prompt_embeds_qwen=None, negative_prompt_embeds_clip=None, prompt_cu_seqlens=None, negative_prompt_cu_seqlens=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): """ Validate input parameters for the pipeline. Args: prompt: Input prompt negative_prompt: Negative prompt for guidance height: Image height width: Image width prompt_embeds_qwen: Pre-computed Qwen prompt embeddings prompt_embeds_clip: Pre-computed CLIP prompt embeddings negative_prompt_embeds_qwen: Pre-computed Qwen negative prompt embeddings negative_prompt_embeds_clip: Pre-computed CLIP negative prompt embeddings prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen positive prompt negative_prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen negative prompt callback_on_step_end_tensor_inputs: Callback tensor inputs Raises: ValueError: If inputs are invalid """ if max_sequence_length is not None and max_sequence_length > 1024: raise ValueError("max_sequence_length must be less than 1024") if (width, height) not in self.resolutions: resolutions_str = ",".join([f"({w},{h})" for w, h in self.resolutions]) logger.warning( f"`height` and `width` have to be one of {resolutions_str}, but are {height} and {width}. Dimensions will be resized accordingly" ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) # Check for consistency within positive prompt embeddings and sequence lengths if prompt_embeds_qwen is not None or prompt_embeds_clip is not None or prompt_cu_seqlens is not None: if prompt_embeds_qwen is None or prompt_embeds_clip is None or prompt_cu_seqlens is None: raise ValueError( "If any of `prompt_embeds_qwen`, `prompt_embeds_clip`, or `prompt_cu_seqlens` is provided, " "all three must be provided." ) # Check for consistency within negative prompt embeddings and sequence lengths if ( negative_prompt_embeds_qwen is not None or negative_prompt_embeds_clip is not None or negative_prompt_cu_seqlens is not None ): if ( negative_prompt_embeds_qwen is None or negative_prompt_embeds_clip is None or negative_prompt_cu_seqlens is None ): raise ValueError( "If any of `negative_prompt_embeds_qwen`, `negative_prompt_embeds_clip`, or `negative_prompt_cu_seqlens` is provided, " "all three must be provided." ) # Check if prompt or embeddings are provided (either prompt or all required embedding components for positive) if prompt is None and prompt_embeds_qwen is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_qwen` (and corresponding `prompt_embeds_clip` and `prompt_cu_seqlens`). Cannot leave all undefined." ) # Validate types for prompt and negative_prompt if provided if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") def prepare_latents( self, batch_size: int, num_channels_latents: int = 16, height: int = 1024, width: int = 1024, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: """ Prepare initial latent variables for text-to-image generation. This method creates random noise latents Args: batch_size (int): Number of images to generate num_channels_latents (int): Number of channels in latent space height (int): Height of generated image width (int): Width of generated image dtype (torch.dtype): Data type for latents device (torch.device): Device to create latents on generator (torch.Generator): Random number generator latents (torch.Tensor): Pre-existing latents to use Returns: torch.Tensor: Prepared latent tensor """ if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, 1, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, num_channels_latents, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) # Generate random noise latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents @property def guidance_scale(self): """Get the current guidance scale value.""" return self._guidance_scale @property def num_timesteps(self): """Get the number of denoising timesteps.""" return self._num_timesteps @property def interrupt(self): """Check if generation has been interrupted.""" return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, negative_prompt: str | list[str] | None = None, height: int = 1024, width: int = 1024, num_inference_steps: int = 50, guidance_scale: float = 3.5, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds_qwen: torch.Tensor | None = None, prompt_embeds_clip: torch.Tensor | None = None, negative_prompt_embeds_qwen: torch.Tensor | None = None, negative_prompt_embeds_clip: torch.Tensor | None = None, prompt_cu_seqlens: torch.Tensor | None = None, negative_prompt_cu_seqlens: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" The call function to the pipeline for text-to-image generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, pass `prompt_embeds` instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts to avoid during image generation. If not defined, pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale` < `1`). height (`int`, defaults to `1024`): The height in pixels of the generated image. width (`int`, defaults to `1024`): The width in pixels of the generated image. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. guidance_scale (`float`, defaults to `5.0`): Guidance scale as defined in classifier-free guidance. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A torch generator to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents. prompt_embeds_qwen (`torch.Tensor`, *optional*): Pre-generated Qwen text embeddings. prompt_embeds_clip (`torch.Tensor`, *optional*): Pre-generated CLIP text embeddings. negative_prompt_embeds_qwen (`torch.Tensor`, *optional*): Pre-generated Qwen negative text embeddings. negative_prompt_embeds_clip (`torch.Tensor`, *optional*): Pre-generated CLIP negative text embeddings. prompt_cu_seqlens (`torch.Tensor`, *optional*): Pre-generated cumulative sequence lengths for Qwen positive prompt. negative_prompt_cu_seqlens (`torch.Tensor`, *optional*): Pre-generated cumulative sequence lengths for Qwen negative prompt. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`KandinskyImagePipelineOutput`]. callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function that is called at the end of each denoising step. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. max_sequence_length (`int`, defaults to `512`): The maximum sequence length for text encoding. Examples: Returns: [`~KandinskyImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`KandinskyImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs self.check_inputs( prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, prompt_embeds_qwen=prompt_embeds_qwen, prompt_embeds_clip=prompt_embeds_clip, negative_prompt_embeds_qwen=negative_prompt_embeds_qwen, negative_prompt_embeds_clip=negative_prompt_embeds_clip, prompt_cu_seqlens=prompt_cu_seqlens, negative_prompt_cu_seqlens=negative_prompt_cu_seqlens, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) if (width, height) not in self.resolutions: width, height = self.resolutions[ np.argmin([abs((i[0] / i[1]) - (width / height)) for i in self.resolutions]) ] self._guidance_scale = guidance_scale self._interrupt = False device = self._execution_device dtype = self.transformer.dtype # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 prompt = [prompt] elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds_qwen.shape[0] # 3. Encode input prompt if prompt_embeds_qwen is None: prompt_embeds_qwen, prompt_embeds_clip, prompt_cu_seqlens = self.encode_prompt( prompt=prompt, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if self.guidance_scale > 1.0: if negative_prompt is None: negative_prompt = "" if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] * len(prompt) if prompt is not None else [negative_prompt] elif len(negative_prompt) != len(prompt): raise ValueError( f"`negative_prompt` must have same length as `prompt`. Got {len(negative_prompt)} vs {len(prompt)}." ) if negative_prompt_embeds_qwen is None: negative_prompt_embeds_qwen, negative_prompt_embeds_clip, negative_prompt_cu_seqlens = ( self.encode_prompt( prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_visual_dim latents = self.prepare_latents( batch_size=batch_size * num_images_per_prompt, num_channels_latents=num_channels_latents, height=height, width=width, dtype=dtype, device=device, generator=generator, latents=latents, ) # 6. Prepare rope positions for positional encoding visual_rope_pos = [ torch.arange(1, device=device), torch.arange(height // self.vae_scale_factor_spatial // 2, device=device), torch.arange(width // self.vae_scale_factor_spatial // 2, device=device), ] text_rope_pos = torch.arange(prompt_cu_seqlens.diff().max().item(), device=device) negative_text_rope_pos = ( torch.arange(negative_prompt_cu_seqlens.diff().max().item(), device=device) if negative_prompt_cu_seqlens is not None else None ) # 7. Calculate dynamic scale factor based on resolution scale_factor = [1.0, 1.0, 1.0] # 8. Sparse Params for efficient attention sparse_params = None # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue timestep = t.unsqueeze(0).repeat(batch_size * num_images_per_prompt) # Predict noise residual pred_velocity = self.transformer( hidden_states=latents.to(dtype), encoder_hidden_states=prompt_embeds_qwen.to(dtype), pooled_projections=prompt_embeds_clip.to(dtype), timestep=timestep.to(dtype), visual_rope_pos=visual_rope_pos, text_rope_pos=text_rope_pos, scale_factor=scale_factor, sparse_params=sparse_params, return_dict=True, ).sample if self.guidance_scale > 1.0 and negative_prompt_embeds_qwen is not None: uncond_pred_velocity = self.transformer( hidden_states=latents.to(dtype), encoder_hidden_states=negative_prompt_embeds_qwen.to(dtype), pooled_projections=negative_prompt_embeds_clip.to(dtype), timestep=timestep.to(dtype), visual_rope_pos=visual_rope_pos, text_rope_pos=negative_text_rope_pos, scale_factor=scale_factor, sparse_params=sparse_params, return_dict=True, ).sample pred_velocity = uncond_pred_velocity + guidance_scale * (pred_velocity - uncond_pred_velocity) latents = self.scheduler.step(pred_velocity[:, :], t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds_qwen = callback_outputs.pop("prompt_embeds_qwen", prompt_embeds_qwen) prompt_embeds_clip = callback_outputs.pop("prompt_embeds_clip", prompt_embeds_clip) negative_prompt_embeds_qwen = callback_outputs.pop( "negative_prompt_embeds_qwen", negative_prompt_embeds_qwen ) negative_prompt_embeds_clip = callback_outputs.pop( "negative_prompt_embeds_clip", negative_prompt_embeds_clip ) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # 9. Post-processing - extract main latents latents = latents[:, :, :, :, :num_channels_latents] # 10. Decode latents to image if output_type != "latent": latents = latents.to(self.vae.dtype) # Reshape and normalize latents latents = latents.reshape( batch_size, num_images_per_prompt, 1, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, num_channels_latents, ) latents = latents.permute(0, 1, 5, 2, 3, 4) # [batch, num_images, channels, 1, height, width] latents = latents.reshape( batch_size * num_images_per_prompt, num_channels_latents, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) # Normalize and decode through VAE latents = latents / self.vae.config.scaling_factor image = self.vae.decode(latents).sample image = self.image_processor.postprocess(image, output_type=output_type) else: image = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return KandinskyImagePipelineOutput(image=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/kandinsky5/pipeline_kandinsky_t2i.py", "license": "Apache License 2.0", "lines": 697, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/kandinsky5/test_kandinsky5_i2i.py
# Copyright 2025 The Kandinsky Team and The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from PIL import Image from transformers import ( AutoProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, ) from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Kandinsky5I2IPipeline, Kandinsky5Transformer3DModel, ) from diffusers.utils.testing_utils import enable_full_determinism from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Kandinsky5I2IPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Kandinsky5I2IPipeline batch_params = ["prompt", "negative_prompt"] params = frozenset(["image", "prompt", "height", "width", "num_inference_steps", "guidance_scale"]) required_optional_params = { "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", "max_sequence_length", } test_xformers_attention = False supports_optional_components = True supports_dduf = False test_attention_slicing = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKL( act_fn="silu", block_out_channels=[32, 64, 64, 64], down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], force_upcast=True, in_channels=3, latent_channels=16, layers_per_block=1, mid_block_add_attention=False, norm_num_groups=32, out_channels=3, sample_size=64, scaling_factor=0.3611, shift_factor=0.1159, up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], use_post_quant_conv=False, use_quant_conv=False, ) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) qwen_hidden_size = 32 torch.manual_seed(0) qwen_config = Qwen2_5_VLConfig( text_config={ "hidden_size": qwen_hidden_size, "intermediate_size": qwen_hidden_size, "num_hidden_layers": 2, "num_attention_heads": 2, "num_key_value_heads": 2, "rope_scaling": { "mrope_section": [2, 2, 4], "rope_type": "default", "type": "default", }, "rope_theta": 1000000.0, }, vision_config={ "depth": 2, "hidden_size": qwen_hidden_size, "intermediate_size": qwen_hidden_size, "num_heads": 2, "out_hidden_size": qwen_hidden_size, }, hidden_size=qwen_hidden_size, vocab_size=152064, vision_end_token_id=151653, vision_start_token_id=151652, vision_token_id=151654, ) text_encoder = Qwen2_5_VLForConditionalGeneration(qwen_config) tokenizer = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") clip_hidden_size = 16 torch.manual_seed(0) clip_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=clip_hidden_size, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, projection_dim=clip_hidden_size, ) text_encoder_2 = CLIPTextModel(clip_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) transformer = Kandinsky5Transformer3DModel( in_visual_dim=16, in_text_dim=qwen_hidden_size, in_text_dim2=clip_hidden_size, time_dim=16, out_visual_dim=16, patch_size=(1, 2, 2), model_dim=16, ff_dim=32, num_text_blocks=1, num_visual_blocks=2, axes_dims=(1, 1, 2), visual_cond=True, attention_type="regular", ) return { "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "transformer": transformer, "scheduler": scheduler, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = Image.new("RGB", (64, 64), color="red") return { "image": image, "prompt": "a red square", "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "generator": generator, "output_type": "pt", "max_sequence_length": 8, } def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.resolutions = [(64, 64)] pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) image = output.image self.assertEqual(image.shape, (1, 3, 64, 64)) @unittest.skip("TODO: Test does not work") def test_encode_prompt_works_in_isolation(self): pass @unittest.skip("TODO: revisit, Batch isnot yet supported in this pipeline") def test_num_images_per_prompt(self): pass @unittest.skip("TODO: revisit, Batch isnot yet supported in this pipeline") def test_inference_batch_single_identical(self): pass @unittest.skip("TODO: revisit, Batch isnot yet supported in this pipeline") def test_inference_batch_consistent(self): pass @unittest.skip("TODO: revisit, not working") def test_float16_inference(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/kandinsky5/test_kandinsky5_i2i.py", "license": "Apache License 2.0", "lines": 186, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/kandinsky5/test_kandinsky5_i2v.py
# Copyright 2025 The Kandinsky Team and The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from PIL import Image from transformers import ( AutoProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, ) from diffusers import ( AutoencoderKLHunyuanVideo, FlowMatchEulerDiscreteScheduler, Kandinsky5I2VPipeline, Kandinsky5Transformer3DModel, ) from diffusers.utils.testing_utils import enable_full_determinism from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Kandinsky5I2VPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Kandinsky5I2VPipeline batch_params = ["prompt", "negative_prompt"] params = frozenset(["image", "prompt", "height", "width", "num_frames", "num_inference_steps", "guidance_scale"]) required_optional_params = { "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", "max_sequence_length", } test_xformers_attention = False supports_optional_components = True supports_dduf = False test_attention_slicing = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLHunyuanVideo( act_fn="silu", block_out_channels=[32, 64, 64], down_block_types=[ "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", "HunyuanVideoDownBlock3D", ], in_channels=3, latent_channels=16, layers_per_block=1, mid_block_add_attention=False, norm_num_groups=32, out_channels=3, scaling_factor=0.476986, spatial_compression_ratio=8, temporal_compression_ratio=4, up_block_types=[ "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", "HunyuanVideoUpBlock3D", ], ) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) qwen_hidden_size = 32 torch.manual_seed(0) qwen_config = Qwen2_5_VLConfig( text_config={ "hidden_size": qwen_hidden_size, "intermediate_size": qwen_hidden_size, "num_hidden_layers": 2, "num_attention_heads": 2, "num_key_value_heads": 2, "rope_scaling": { "mrope_section": [2, 2, 4], "rope_type": "default", "type": "default", }, "rope_theta": 1000000.0, }, vision_config={ "depth": 2, "hidden_size": qwen_hidden_size, "intermediate_size": qwen_hidden_size, "num_heads": 2, "out_hidden_size": qwen_hidden_size, }, hidden_size=qwen_hidden_size, vocab_size=152064, vision_end_token_id=151653, vision_start_token_id=151652, vision_token_id=151654, ) text_encoder = Qwen2_5_VLForConditionalGeneration(qwen_config) tokenizer = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") clip_hidden_size = 16 torch.manual_seed(0) clip_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=clip_hidden_size, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, projection_dim=clip_hidden_size, ) text_encoder_2 = CLIPTextModel(clip_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) transformer = Kandinsky5Transformer3DModel( in_visual_dim=16, in_text_dim=qwen_hidden_size, in_text_dim2=clip_hidden_size, time_dim=16, out_visual_dim=16, patch_size=(1, 2, 2), model_dim=16, ff_dim=32, num_text_blocks=1, num_visual_blocks=2, axes_dims=(1, 1, 2), visual_cond=True, attention_type="regular", ) return { "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "transformer": transformer, "scheduler": scheduler, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = Image.new("RGB", (32, 32), color="red") return { "image": image, "prompt": "a red square", "height": 32, "width": 32, "num_frames": 17, "num_inference_steps": 2, "guidance_scale": 4.0, "generator": generator, "output_type": "pt", "max_sequence_length": 8, } def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) video = output.frames[0] # 17 frames, RGB, 32×32 self.assertEqual(video.shape, (17, 3, 32, 32)) @unittest.skip("TODO:Test does not work") def test_encode_prompt_works_in_isolation(self): pass @unittest.skip("TODO: revisit") def test_callback_inputs(self): pass @unittest.skip("TODO: revisit") def test_inference_batch_single_identical(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/kandinsky5/test_kandinsky5_i2v.py", "license": "Apache License 2.0", "lines": 186, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/kandinsky5/test_kandinsky5_t2i.py
# Copyright 2025 The Kandinsky Team and The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from transformers import ( AutoProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, ) from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, Kandinsky5T2IPipeline, Kandinsky5Transformer3DModel, ) from diffusers.utils.testing_utils import enable_full_determinism from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Kandinsky5T2IPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Kandinsky5T2IPipeline batch_params = ["prompt", "negative_prompt"] params = frozenset(["prompt", "height", "width", "num_inference_steps", "guidance_scale"]) required_optional_params = { "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", "max_sequence_length", } test_xformers_attention = False supports_optional_components = True supports_dduf = False test_attention_slicing = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKL( act_fn="silu", block_out_channels=[32, 64], down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], force_upcast=True, in_channels=3, latent_channels=16, layers_per_block=1, mid_block_add_attention=False, norm_num_groups=32, out_channels=3, sample_size=128, scaling_factor=0.3611, shift_factor=0.1159, up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], use_post_quant_conv=False, use_quant_conv=False, ) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) qwen_hidden_size = 32 torch.manual_seed(0) qwen_config = Qwen2_5_VLConfig( text_config={ "hidden_size": qwen_hidden_size, "intermediate_size": qwen_hidden_size, "num_hidden_layers": 2, "num_attention_heads": 2, "num_key_value_heads": 2, "rope_scaling": { "mrope_section": [2, 2, 4], "rope_type": "default", "type": "default", }, "rope_theta": 1000000.0, }, vision_config={ "depth": 2, "hidden_size": qwen_hidden_size, "intermediate_size": qwen_hidden_size, "num_heads": 2, "out_hidden_size": qwen_hidden_size, }, hidden_size=qwen_hidden_size, vocab_size=152064, vision_end_token_id=151653, vision_start_token_id=151652, vision_token_id=151654, ) text_encoder = Qwen2_5_VLForConditionalGeneration(qwen_config) tokenizer = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") clip_hidden_size = 16 torch.manual_seed(0) clip_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=clip_hidden_size, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, projection_dim=clip_hidden_size, ) text_encoder_2 = CLIPTextModel(clip_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) transformer = Kandinsky5Transformer3DModel( in_visual_dim=16, in_text_dim=qwen_hidden_size, in_text_dim2=clip_hidden_size, time_dim=16, out_visual_dim=16, patch_size=(1, 2, 2), model_dim=16, ff_dim=32, num_text_blocks=1, num_visual_blocks=2, axes_dims=(1, 1, 2), visual_cond=False, attention_type="regular", ) return { "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "transformer": transformer, "scheduler": scheduler, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) return { "prompt": "a red square", "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "generator": generator, "output_type": "pt", "max_sequence_length": 8, } def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.resolutions = [(64, 64)] pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) image = output.image self.assertEqual(image.shape, (1, 3, 16, 16)) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass @unittest.skip("Only SDPA or NABLA (flex)") def test_xformers_memory_efficient_attention(self): pass @unittest.skip("All encoders are needed") def test_encode_prompt_works_in_isolation(self): pass @unittest.skip("Meant for eiter FP32 or BF16 inference") def test_float16_inference(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/kandinsky5/test_kandinsky5_t2i.py", "license": "Apache License 2.0", "lines": 182, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:scripts/convert_ovis_image_to_diffusers.py
import argparse from contextlib import nullcontext import safetensors.torch import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download from diffusers import OvisImageTransformer2DModel from diffusers.utils.import_utils import is_accelerate_available """ # Transformer python scripts/convert_ovis_image_to_diffusers.py \ --original_state_dict_repo_id "AIDC-AI/Ovis-Image-7B" \ --filename "ovis_image.safetensors" --output_path "ovis-image" \ --transformer """ CTX = init_empty_weights if is_accelerate_available() else nullcontext parser = argparse.ArgumentParser() parser.add_argument("--original_state_dict_repo_id", default=None, type=str) parser.add_argument("--filename", default="ovis_image.safetensors", type=str) parser.add_argument("--checkpoint_path", default=None, type=str) parser.add_argument("--in_channels", type=int, default=64) parser.add_argument("--out_channels", type=int, default=None) parser.add_argument("--transformer", action="store_true") parser.add_argument("--output_path", type=str) parser.add_argument("--dtype", type=str, default="bf16") args = parser.parse_args() dtype = torch.bfloat16 if args.dtype == "bf16" else torch.float32 def load_original_checkpoint(args): if args.original_state_dict_repo_id is not None: ckpt_path = hf_hub_download(repo_id=args.original_state_dict_repo_id, filename=args.filename) elif args.checkpoint_path is not None: ckpt_path = args.checkpoint_path else: raise ValueError(" please provide either `original_state_dict_repo_id` or a local `checkpoint_path`") original_state_dict = safetensors.torch.load_file(ckpt_path) return original_state_dict # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation def swap_scale_shift(weight): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def convert_ovis_image_transformer_checkpoint_to_diffusers( original_state_dict, num_layers, num_single_layers, inner_dim, mlp_ratio=4.0 ): converted_state_dict = {} ## time_text_embed.timestep_embedder <- time_in converted_state_dict["timestep_embedder.linear_1.weight"] = original_state_dict.pop("time_in.in_layer.weight") converted_state_dict["timestep_embedder.linear_1.bias"] = original_state_dict.pop("time_in.in_layer.bias") converted_state_dict["timestep_embedder.linear_2.weight"] = original_state_dict.pop("time_in.out_layer.weight") converted_state_dict["timestep_embedder.linear_2.bias"] = original_state_dict.pop("time_in.out_layer.bias") # context_embedder converted_state_dict["context_embedder_norm.weight"] = original_state_dict.pop("semantic_txt_norm.weight") converted_state_dict["context_embedder.weight"] = original_state_dict.pop("semantic_txt_in.weight") converted_state_dict["context_embedder.bias"] = original_state_dict.pop("semantic_txt_in.bias") # x_embedder converted_state_dict["x_embedder.weight"] = original_state_dict.pop("img_in.weight") converted_state_dict["x_embedder.bias"] = original_state_dict.pop("img_in.bias") # double transformer blocks for i in range(num_layers): block_prefix = f"transformer_blocks.{i}." # norms. ## norm1 converted_state_dict[f"{block_prefix}norm1.linear.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_mod.lin.weight" ) converted_state_dict[f"{block_prefix}norm1.linear.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_mod.lin.bias" ) ## norm1_context converted_state_dict[f"{block_prefix}norm1_context.linear.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_mod.lin.weight" ) converted_state_dict[f"{block_prefix}norm1_context.linear.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_mod.lin.bias" ) # Q, K, V sample_q, sample_k, sample_v = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.img_attn.qkv.weight"), 3, dim=0 ) context_q, context_k, context_v = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.txt_attn.qkv.weight"), 3, dim=0 ) sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.img_attn.qkv.bias"), 3, dim=0 ) context_q_bias, context_k_bias, context_v_bias = torch.chunk( original_state_dict.pop(f"double_blocks.{i}.txt_attn.qkv.bias"), 3, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([sample_q]) converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([sample_k]) converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([sample_v]) converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([sample_v_bias]) converted_state_dict[f"{block_prefix}attn.add_q_proj.weight"] = torch.cat([context_q]) converted_state_dict[f"{block_prefix}attn.add_q_proj.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"{block_prefix}attn.add_k_proj.weight"] = torch.cat([context_k]) converted_state_dict[f"{block_prefix}attn.add_k_proj.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"{block_prefix}attn.add_v_proj.weight"] = torch.cat([context_v]) converted_state_dict[f"{block_prefix}attn.add_v_proj.bias"] = torch.cat([context_v_bias]) # qk_norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.norm.query_norm.weight" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.norm.key_norm.weight" ) converted_state_dict[f"{block_prefix}attn.norm_added_q.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.norm.query_norm.weight" ) converted_state_dict[f"{block_prefix}attn.norm_added_k.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.norm.key_norm.weight" ) # ff img_mlp converted_state_dict[f"{block_prefix}ff.net.0.proj.weight"] = torch.cat( [ original_state_dict.pop(f"double_blocks.{i}.img_mlp.up_proj.weight"), original_state_dict.pop(f"double_blocks.{i}.img_mlp.gate_proj.weight"), ], dim=0, ) converted_state_dict[f"{block_prefix}ff.net.0.proj.bias"] = torch.cat( [ original_state_dict.pop(f"double_blocks.{i}.img_mlp.up_proj.bias"), original_state_dict.pop(f"double_blocks.{i}.img_mlp.gate_proj.bias"), ], dim=0, ) converted_state_dict[f"{block_prefix}ff.net.2.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.down_proj.weight" ) converted_state_dict[f"{block_prefix}ff.net.2.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_mlp.down_proj.bias" ) converted_state_dict[f"{block_prefix}ff_context.net.0.proj.weight"] = torch.cat( [ original_state_dict.pop(f"double_blocks.{i}.txt_mlp.up_proj.weight"), original_state_dict.pop(f"double_blocks.{i}.txt_mlp.gate_proj.weight"), ], dim=0, ) converted_state_dict[f"{block_prefix}ff_context.net.0.proj.bias"] = torch.cat( [ original_state_dict.pop(f"double_blocks.{i}.txt_mlp.up_proj.bias"), original_state_dict.pop(f"double_blocks.{i}.txt_mlp.gate_proj.bias"), ], dim=0, ) converted_state_dict[f"{block_prefix}ff_context.net.2.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.down_proj.weight" ) converted_state_dict[f"{block_prefix}ff_context.net.2.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_mlp.down_proj.bias" ) # output projections. converted_state_dict[f"{block_prefix}attn.to_out.0.weight"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.proj.weight" ) converted_state_dict[f"{block_prefix}attn.to_out.0.bias"] = original_state_dict.pop( f"double_blocks.{i}.img_attn.proj.bias" ) converted_state_dict[f"{block_prefix}attn.to_add_out.weight"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.proj.weight" ) converted_state_dict[f"{block_prefix}attn.to_add_out.bias"] = original_state_dict.pop( f"double_blocks.{i}.txt_attn.proj.bias" ) # single transformer blocks for i in range(num_single_layers): block_prefix = f"single_transformer_blocks.{i}." # norm.linear <- single_blocks.0.modulation.lin converted_state_dict[f"{block_prefix}norm.linear.weight"] = original_state_dict.pop( f"single_blocks.{i}.modulation.lin.weight" ) converted_state_dict[f"{block_prefix}norm.linear.bias"] = original_state_dict.pop( f"single_blocks.{i}.modulation.lin.bias" ) # Q, K, V, mlp mlp_hidden_dim = int(inner_dim * mlp_ratio) split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim * 2) q, k, v, mlp = torch.split(original_state_dict.pop(f"single_blocks.{i}.linear1.weight"), split_size, dim=0) q_bias, k_bias, v_bias, mlp_bias = torch.split( original_state_dict.pop(f"single_blocks.{i}.linear1.bias"), split_size, dim=0 ) converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([q]) converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([q_bias]) converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([k]) converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([k_bias]) converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([v]) converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([v_bias]) converted_state_dict[f"{block_prefix}proj_mlp.weight"] = torch.cat([mlp]) converted_state_dict[f"{block_prefix}proj_mlp.bias"] = torch.cat([mlp_bias]) # qk norm converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = original_state_dict.pop( f"single_blocks.{i}.norm.query_norm.weight" ) converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = original_state_dict.pop( f"single_blocks.{i}.norm.key_norm.weight" ) # output projections. converted_state_dict[f"{block_prefix}proj_out.weight"] = original_state_dict.pop( f"single_blocks.{i}.linear2.weight" ) converted_state_dict[f"{block_prefix}proj_out.bias"] = original_state_dict.pop( f"single_blocks.{i}.linear2.bias" ) converted_state_dict["proj_out.weight"] = original_state_dict.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = original_state_dict.pop("final_layer.linear.bias") converted_state_dict["norm_out.linear.weight"] = swap_scale_shift( original_state_dict.pop("final_layer.adaLN_modulation.1.weight") ) converted_state_dict["norm_out.linear.bias"] = swap_scale_shift( original_state_dict.pop("final_layer.adaLN_modulation.1.bias") ) return converted_state_dict def main(args): original_ckpt = load_original_checkpoint(args) if args.transformer: num_layers = 6 num_single_layers = 27 inner_dim = 3072 mlp_ratio = 4.0 converted_transformer_state_dict = convert_ovis_image_transformer_checkpoint_to_diffusers( original_ckpt, num_layers, num_single_layers, inner_dim, mlp_ratio=mlp_ratio ) transformer = OvisImageTransformer2DModel(in_channels=args.in_channels, out_channels=args.out_channels) transformer.load_state_dict(converted_transformer_state_dict, strict=True) print("Saving Ovis-Image Transformer in Diffusers format.") transformer.to(dtype).save_pretrained(f"{args.output_path}/transformer") if __name__ == "__main__": main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "scripts/convert_ovis_image_to_diffusers.py", "license": "Apache License 2.0", "lines": 233, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
huggingface/diffusers:src/diffusers/models/transformers/transformer_ovis_image.py
# Copyright 2025 Alibaba Ovis-Image Team and The HuggingFace. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import logging from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import TimestepEmbedding, Timesteps, apply_rotary_emb, get_1d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _get_projections(attn: "OvisImageAttention", hidden_states, encoder_hidden_states=None): query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) encoder_query = encoder_key = encoder_value = None if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) return query, key, value, encoder_query, encoder_key, encoder_value def _get_fused_projections(attn: "OvisImageAttention", hidden_states, encoder_hidden_states=None): query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) encoder_query = encoder_key = encoder_value = (None,) if encoder_hidden_states is not None and hasattr(attn, "to_added_qkv"): encoder_query, encoder_key, encoder_value = attn.to_added_qkv(encoder_hidden_states).chunk(3, dim=-1) return query, key, value, encoder_query, encoder_key, encoder_value def _get_qkv_projections(attn: "OvisImageAttention", hidden_states, encoder_hidden_states=None): if attn.fused_projections: return _get_fused_projections(attn, hidden_states, encoder_hidden_states) return _get_projections(attn, hidden_states, encoder_hidden_states) class OvisImageAttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.") def __call__( self, attn: "OvisImageAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( attn, hidden_states, encoder_hidden_states ) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) query = attn.norm_q(query) key = attn.norm_k(key) if attn.added_kv_proj_dim is not None: encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) encoder_query = attn.norm_added_q(encoder_query) encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([encoder_query, query], dim=1) key = torch.cat([encoder_key, key], dim=1) value = torch.cat([encoder_value, value], dim=1) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, backend=self._attention_backend, parallel_config=self._parallel_config, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 ) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return hidden_states, encoder_hidden_states else: return hidden_states class OvisImageAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = OvisImageAttnProcessor _available_processors = [ OvisImageAttnProcessor, ] def __init__( self, query_dim: int, heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, added_kv_proj_dim: int | None = None, added_proj_bias: bool | None = True, out_bias: bool = True, eps: float = 1e-5, out_dim: int = None, context_pre_only: bool | None = None, pre_only: bool = False, elementwise_affine: bool = True, processor=None, ): super().__init__() self.head_dim = dim_head self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.use_bias = bias self.dropout = dropout self.out_dim = out_dim if out_dim is not None else query_dim self.context_pre_only = context_pre_only self.pre_only = pre_only self.heads = out_dim // dim_head if out_dim is not None else heads self.added_kv_proj_dim = added_kv_proj_dim self.added_proj_bias = added_proj_bias self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) if not self.pre_only: self.to_out = torch.nn.ModuleList([]) self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(torch.nn.Dropout(dropout)) if added_kv_proj_dim is not None: self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps) self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps) self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias) if processor is None: processor = self._default_processor_cls() self.set_processor(processor) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, **kwargs, ) -> torch.Tensor: attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) quiet_attn_parameters = {"ip_adapter_masks", "ip_hidden_states"} unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters] if len(unused_kwargs) > 0: logger.warning( f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) @maybe_allow_in_graph class OvisImageSingleTransformerBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) self.norm = AdaLayerNormZeroSingle(dim) self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim * 2) self.act_mlp = nn.SiLU() self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) self.attn = OvisImageAttention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, processor=OvisImageAttnProcessor(), eps=1e-6, pre_only=True, ) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, joint_attention_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: text_seq_len = encoder_hidden_states.shape[1] hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) residual = hidden_states norm_hidden_states, gate = self.norm(hidden_states, emb=temb) mlp_hidden_states, mlp_hidden_gate = torch.split( self.proj_mlp(norm_hidden_states), [self.mlp_hidden_dim, self.mlp_hidden_dim], dim=-1 ) mlp_hidden_states = self.act_mlp(mlp_hidden_gate) * mlp_hidden_states joint_attention_kwargs = joint_attention_kwargs or {} attn_output = self.attn( hidden_states=norm_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) gate = gate.unsqueeze(1) hidden_states = gate * self.proj_out(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:] return encoder_hidden_states, hidden_states @maybe_allow_in_graph class OvisImageTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6 ): super().__init__() self.norm1 = AdaLayerNormZero(dim) self.norm1_context = AdaLayerNormZero(dim) self.attn = OvisImageAttention( query_dim=dim, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=False, bias=True, processor=OvisImageAttnProcessor(), eps=eps, ) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="swiglu") self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="swiglu") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, joint_attention_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) joint_attention_kwargs = joint_attention_kwargs or {} # Attention. attention_outputs = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) if len(attention_outputs) == 2: attn_output, context_attn_output = attention_outputs elif len(attention_outputs) == 3: attn_output, context_attn_output, ip_attn_output = attention_outputs # Process attention outputs for the `hidden_states`. attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output if len(attention_outputs) == 3: hidden_states = hidden_states + ip_attn_output # Process attention outputs for the `encoder_hidden_states`. context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return encoder_hidden_states, hidden_states class OvisImagePosEmbed(nn.Module): def __init__(self, theta: int, axes_dim: list[int]): super().__init__() self.theta = theta self.axes_dim = axes_dim def forward(self, ids: torch.Tensor) -> torch.Tensor: n_axes = ids.shape[-1] cos_out = [] sin_out = [] pos = ids.float() is_mps = ids.device.type == "mps" is_npu = ids.device.type == "npu" freqs_dtype = torch.float32 if (is_mps or is_npu) else torch.float64 for i in range(n_axes): cos, sin = get_1d_rotary_pos_embed( self.axes_dim[i], pos[:, i], theta=self.theta, repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype, ) cos_out.append(cos) sin_out.append(sin) freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) return freqs_cos, freqs_sin class OvisImageTransformer2DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, ): """ The Transformer model introduced in Ovis-Image. Reference: https://github.com/AIDC-AI/Ovis-Image Args: patch_size (`int`, defaults to `1`): Patch size to turn the input data into small patches. in_channels (`int`, defaults to `64`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `None`): The number of channels in the output. If not specified, it defaults to `in_channels`. num_layers (`int`, defaults to `6`): The number of layers of dual stream DiT blocks to use. num_single_layers (`int`, defaults to `27`): The number of layers of single stream DiT blocks to use. attention_head_dim (`int`, defaults to `128`): The number of dimensions to use for each attention head. num_attention_heads (`int`, defaults to `24`): The number of attention heads to use. joint_attention_dim (`int`, defaults to `2048`): The number of dimensions to use for the joint attention (embedding/channel dimension of `encoder_hidden_states`). axes_dims_rope (`tuple[int]`, defaults to `(16, 56, 56)`): The dimensions to use for the rotary positional embeddings. """ _supports_gradient_checkpointing = True _no_split_modules = ["OvisImageTransformerBlock", "OvisImageSingleTransformerBlock"] _skip_layerwise_casting_patterns = ["pos_embed", "norm"] _repeated_blocks = ["OvisImageTransformerBlock", "OvisImageSingleTransformerBlock"] @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, out_channels: int | None = 64, num_layers: int = 6, num_single_layers: int = 27, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 2048, axes_dims_rope: tuple[int, int, int] = (16, 56, 56), ): super().__init__() self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim self.pos_embed = OvisImagePosEmbed(theta=10000, axes_dim=axes_dims_rope) self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=self.inner_dim) self.context_embedder_norm = nn.RMSNorm(joint_attention_dim, eps=1e-6) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ OvisImageTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ OvisImageSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, ) for _ in range(num_single_layers) ] ) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, return_dict: bool = True, ) -> torch.Tensor | Transformer2DModelOutput: """ The [`OvisImageTransformer2DModel`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): Input `hidden_states`. encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. timestep (`torch.LongTensor`): Used to indicate denoising step. img_ids: (`torch.Tensor`): The position ids for image tokens. txt_ids (`torch.Tensor`): The position ids for text tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ hidden_states = self.x_embedder(hidden_states) timestep = timestep.to(hidden_states.dtype) * 1000 timesteps_proj = self.time_proj(timestep) temb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_states.dtype)) encoder_hidden_states = self.context_embedder_norm(encoder_hidden_states) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if txt_ids.ndim == 3: logger.warning( "Passing `txt_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) txt_ids = txt_ids[0] if img_ids.ndim == 3: logger.warning( "Passing `img_ids` 3d torch.Tensor is deprecated." "Please remove the batch dimension and pass it as a 2d torch Tensor" ) img_ids = img_ids[0] ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, ) hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_ovis_image.py", "license": "Apache License 2.0", "lines": 483, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/ovis_image/pipeline_output.py
# Copyright 2025 Alibaba Ovis-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass import numpy as np import PIL.Image from diffusers.utils import BaseOutput @dataclass class OvisImagePipelineOutput(BaseOutput): """ Output class for Ovis-Image pipelines. Args: images (`list[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: list[PIL.Image.Image, np.ndarray]
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ovis_image/pipeline_output.py", "license": "Apache License 2.0", "lines": 27, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/ovis_image/pipeline_ovis_image.py
# Copyright 2025 Alibaba Ovis-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import numpy as np import torch from transformers import Qwen2TokenizerFast, Qwen3Model from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, OvisImageTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import OvisImagePipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import OvisImagePipeline >>> pipe = OvisImagePipeline.from_pretrained("AIDC-AI/Ovis-Image-7B", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> prompt = 'A creative 3D artistic render where the text "OVIS-IMAGE" is written in a bold, expressive handwritten brush style using thick, wet oil paint. The paint is a mix of vibrant rainbow colors (red, blue, yellow) swirling together like toothpaste or impasto art. You can see the ridges of the brush bristles and the glossy, wet texture of the paint. The background is a clean artist\'s canvas. Dynamic lighting creates soft shadows behind the floating paint strokes. Colorful, expressive, tactile texture, 4k detail.' >>> image = pipe(prompt, negative_prompt="", num_inference_steps=50, guidance_scale=5.0).images[0] >>> image.save("ovis_image.png") ``` """ def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class OvisImagePipeline( DiffusionPipeline, ): r""" The Ovis-Image pipeline for text-to-image generation. Reference: https://github.com/AIDC-AI/Ovis-Image Args: transformer ([`OvisImageTransformer2DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`Qwen3Model`]): Text encoder of class [Qwen3Model](https://huggingface.co/docs/transformers/en/model_doc/qwen3#transformers.Qwen3Model). tokenizer (`Qwen2TokenizerFast`): Tokenizer of class [Qwen2TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/qwen2#transformers.Qwen2TokenizerFast). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: Qwen3Model, tokenizer: Qwen2TokenizerFast, transformer: OvisImageTransformer2DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 # Ovis-Image latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.system_prompt = "Describe the image by detailing the color, quantity, text, shape, size, texture, spatial relationships of the objects and background: " self.user_prompt_begin_id = 28 self.tokenizer_max_length = 256 + self.user_prompt_begin_id self.default_sample_size = 128 def _get_messages( self, prompt: str | list[str] = None, ): prompt = [prompt] if isinstance(prompt, str) else prompt messages = [] for each_prompt in prompt: message = [ { "role": "user", "content": self.system_prompt + each_prompt, } ] message = self.tokenizer.apply_chat_template( message, tokenize=False, add_generation_prompt=True, enable_thinking=False ) messages.append(message) return messages def _get_ovis_prompt_embeds( self, prompt: str | list[str] = None, num_images_per_prompt: int = 1, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype messages = self._get_messages(prompt) batch_size = len(messages) tokens = self.tokenizer( messages, padding="max_length", truncation=True, max_length=self.tokenizer_max_length, return_tensors="pt", add_special_tokens=False, ) input_ids = tokens.input_ids.to(device) attention_mask = tokens.attention_mask.to(device) outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, ) prompt_embeds = outputs.last_hidden_state prompt_embeds = prompt_embeds * attention_mask[..., None] prompt_embeds = prompt_embeds[:, self.user_prompt_begin_id :, :] _, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, num_images_per_prompt: int = 1, prompt_embeds: torch.FloatTensor | None = None, ): r""" Args: prompt (`str`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. """ device = device or self._execution_device if prompt_embeds is None: prompt_embeds = self._get_ovis_prompt_embeds( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, ) dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3) text_ids[..., 1] = text_ids[..., 1] + torch.arange(prompt_embeds.shape[1])[None, :] text_ids[..., 2] = text_ids[..., 2] + torch.arange(prompt_embeds.shape[1])[None, :] text_ids = text_ids.to(device=device, dtype=dtype) return prompt_embeds, text_ids def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if max_sequence_length is not None and max_sequence_length > 256: raise ValueError(f"`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}") @staticmethod def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height, width, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape( latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents @staticmethod def _unpack_latents(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (vae_scale_factor * 2)) width = 2 * (int(width) // (vae_scale_factor * 2)) latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height, width) return latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is not None: latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) return latents.to(device=device, dtype=dtype), latent_image_ids if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) return latents, latent_image_ids @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, negative_prompt: str | list[str] = "", guidance_scale: float = 5.0, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, sigmas: list[float] | None = None, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: torch.FloatTensor | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 256, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is not greater than `1`). guidance_scale (`float`, *optional*, defaults to 1.0): True classifier-free guidance (guidance scale) is enabled when `guidance_scale` > 1 and `negative_prompt` is provided. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.ovis_image.OvisImagePipelineOutput`] or `tuple`: [`~pipelines.ovis_image.OvisImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) self._joint_attention_kwargs = joint_attention_kwargs self._current_timestep = None self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device do_classifier_free_guidance = guidance_scale > 1 ( prompt_embeds, text_ids, ) = self.encode_prompt( prompt=prompt, prompt_embeds=prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, ) if do_classifier_free_guidance: ( negative_prompt_embeds, negative_text_ids, ) = self.encode_prompt( prompt=negative_prompt, prompt_embeds=negative_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, ) # 4. Prepare latent variables num_channels_latents = self.transformer.config.in_channels // 4 latents, latent_image_ids = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 5. Prepare timesteps sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas if hasattr(self.scheduler.config, "use_flow_sigmas") and self.scheduler.config.use_flow_sigmas: sigmas = None image_seq_len = latents.shape[1] mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) if self.joint_attention_kwargs is None: self._joint_attention_kwargs = {} # 6. Denoising loop # We set the index here to remove DtoH sync, helpful especially during compilation. # Check out more details here: https://github.com/huggingface/diffusers/pull/11696 self.scheduler.set_begin_index(0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) with self.transformer.cache_context("cond"): noise_pred = self.transformer( hidden_states=latents, timestep=timestep / 1000, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, return_dict=False, )[0] if do_classifier_free_guidance: with self.transformer.cache_context("uncond"): neg_noise_pred = self.transformer( hidden_states=latents, timestep=timestep / 1000, encoder_hidden_states=negative_prompt_embeds, txt_ids=negative_text_ids, img_ids=latent_image_ids, return_dict=False, )[0] noise_pred = neg_noise_pred + guidance_scale * (noise_pred - neg_noise_pred) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if output_type == "latent": image = latents else: latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return OvisImagePipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/ovis_image/pipeline_ovis_image.py", "license": "Apache License 2.0", "lines": 579, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/lora/test_lora_layers_z_image.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import numpy as np import torch from transformers import Qwen2Tokenizer, Qwen3Config, Qwen3Model from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, ZImagePipeline, ZImageTransformer2DModel from ..testing_utils import floats_tensor, is_peft_available, require_peft_backend, skip_mps, torch_device if is_peft_available(): from peft import LoraConfig sys.path.append(".") from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 @require_peft_backend class ZImageLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = ZImagePipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_kwargs = {} transformer_kwargs = { "all_patch_size": (2,), "all_f_patch_size": (1,), "in_channels": 16, "dim": 32, "n_layers": 2, "n_refiner_layers": 1, "n_heads": 2, "n_kv_heads": 2, "norm_eps": 1e-5, "qk_norm": True, "cap_feat_dim": 16, "rope_theta": 256.0, "t_scale": 1000.0, "axes_dims": [8, 4, 4], "axes_lens": [256, 32, 32], } transformer_cls = ZImageTransformer2DModel vae_kwargs = { "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "block_out_channels": [32, 64], "layers_per_block": 1, "latent_channels": 16, "norm_num_groups": 32, "sample_size": 32, "scaling_factor": 0.3611, "shift_factor": 0.1159, } vae_cls = AutoencoderKL tokenizer_cls, tokenizer_id = Qwen2Tokenizer, "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration" text_encoder_cls, text_encoder_id = Qwen3Model, None # Will be created inline denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0"] supports_text_encoder_loras = False @property def output_shape(self): return (1, 32, 32, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 10 num_channels = 4 sizes = (32, 32) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "A painting of a squirrel eating a burger", "num_inference_steps": 4, "guidance_scale": 0.0, "height": 32, "width": 32, "max_sequence_length": 16, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs def get_dummy_components(self, scheduler_cls=None, use_dora=False, lora_alpha=None): # Override to create Qwen3Model inline since it doesn't have a pretrained tiny model torch.manual_seed(0) config = Qwen3Config( hidden_size=16, intermediate_size=16, num_hidden_layers=2, num_attention_heads=2, num_key_value_heads=2, vocab_size=151936, max_position_embeddings=512, ) text_encoder = Qwen3Model(config) tokenizer = Qwen2Tokenizer.from_pretrained(self.tokenizer_id) transformer = self.transformer_cls(**self.transformer_kwargs) # `x_pad_token` and `cap_pad_token` are initialized with `torch.empty`. # This can cause NaN data values in our testing environment. Fixating them # helps prevent that issue. with torch.no_grad(): transformer.x_pad_token.copy_(torch.ones_like(transformer.x_pad_token.data)) transformer.cap_pad_token.copy_(torch.ones_like(transformer.cap_pad_token.data)) vae = self.vae_cls(**self.vae_kwargs) if scheduler_cls is None: scheduler_cls = self.scheduler_cls scheduler = scheduler_cls(**self.scheduler_kwargs) rank = 4 lora_alpha = rank if lora_alpha is None else lora_alpha text_lora_config = LoraConfig( r=rank, lora_alpha=lora_alpha, target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], init_lora_weights=False, use_dora=use_dora, ) denoiser_lora_config = LoraConfig( r=rank, lora_alpha=lora_alpha, target_modules=self.denoiser_target_modules, init_lora_weights=False, use_dora=use_dora, ) pipeline_components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return pipeline_components, text_lora_config, denoiser_lora_config def test_correct_lora_configs_with_different_ranks(self): components, _, denoiser_lora_config = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") lora_output_same_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.transformer.delete_adapters("adapter-1") denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer for name, _ in denoiser.named_modules(): if "to_k" in name and "attention" in name and "lora" not in name: module_name_to_rank_update = name.replace(".base_layer.", ".") break # change the rank_pattern updated_rank = denoiser_lora_config.r * 2 denoiser_lora_config.rank_pattern = {module_name_to_rank_update: updated_rank} pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") updated_rank_pattern = pipe.transformer.peft_config["adapter-1"].rank_pattern self.assertTrue(updated_rank_pattern == {module_name_to_rank_update: updated_rank}) lora_output_diff_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(not np.allclose(original_output, lora_output_same_rank, atol=1e-3, rtol=1e-3)) self.assertTrue(not np.allclose(lora_output_diff_rank, lora_output_same_rank, atol=1e-3, rtol=1e-3)) pipe.transformer.delete_adapters("adapter-1") # similarly change the alpha_pattern updated_alpha = denoiser_lora_config.lora_alpha * 2 denoiser_lora_config.alpha_pattern = {module_name_to_rank_update: updated_alpha} pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue( pipe.transformer.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha} ) lora_output_diff_alpha = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue(not np.allclose(original_output, lora_output_diff_alpha, atol=1e-3, rtol=1e-3)) self.assertTrue(not np.allclose(lora_output_diff_alpha, lora_output_same_rank, atol=1e-3, rtol=1e-3)) @skip_mps def test_lora_fuse_nan(self): components, _, denoiser_lora_config = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") # corrupt one LoRA weight with `inf` values with torch.no_grad(): possible_tower_names = ["noise_refiner"] filtered_tower_names = [ tower_name for tower_name in possible_tower_names if hasattr(pipe.transformer, tower_name) ] for tower_name in filtered_tower_names: transformer_tower = getattr(pipe.transformer, tower_name) transformer_tower[0].attention.to_q.lora_A["adapter-1"].weight += float("inf") # with `safe_fusing=True` we should see an Error with self.assertRaises(ValueError): pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) # without we should not see an error, but every image will be black pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) out = pipe(**inputs)[0] self.assertTrue(np.isnan(out).all()) def test_lora_scale_kwargs_match_fusion(self): super().test_lora_scale_kwargs_match_fusion(5e-2, 5e-2) @unittest.skip("Needs to be debugged.") def test_set_adapters_match_attention_kwargs(self): super().test_set_adapters_match_attention_kwargs() @unittest.skip("Needs to be debugged.") def test_simple_inference_with_text_denoiser_lora_and_scale(self): super().test_simple_inference_with_text_denoiser_lora_and_scale() @unittest.skip("Not supported in ZImage.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in ZImage.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in ZImage.") def test_modify_padding_mode(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/lora/test_lora_layers_z_image.py", "license": "Apache License 2.0", "lines": 216, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/pipelines/flux2/system_messages.py
# docstyle-ignore """ These system prompts come from: https://github.com/black-forest-labs/flux2/blob/5a5d316b1b42f6b59a8c9194b77c8256be848432/src/flux2/system_messages.py#L54 """ # docstyle-ignore SYSTEM_MESSAGE = """You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object attribution and actions without speculation.""" # docstyle-ignore SYSTEM_MESSAGE_UPSAMPLING_T2I = """You are an expert prompt engineer for FLUX.2 by Black Forest Labs. Rewrite user prompts to be more descriptive while strictly preserving their core subject and intent. Guidelines: 1. Structure: Keep structured inputs structured (enhance within fields). Convert natural language to detailed paragraphs. 2. Details: Add concrete visual specifics - form, scale, textures, materials, lighting (quality, direction, color), shadows, spatial relationships, and environmental context. 3. Text in Images: Put ALL text in quotation marks, matching the prompt's language. Always provide explicit quoted text for objects that would contain text in reality (signs, labels, screens, etc.) - without it, the model generates gibberish. Output only the revised prompt and nothing else.""" # docstyle-ignore SYSTEM_MESSAGE_UPSAMPLING_I2I = """You are FLUX.2 by Black Forest Labs, an image-editing expert. You convert editing requests into one concise instruction (50-80 words, ~30 for brief requests). Rules: - Single instruction only, no commentary - Use clear, analytical language (avoid "whimsical," "cascading," etc.) - Specify what changes AND what stays the same (face, lighting, composition) - Reference actual image elements - Turn negatives into positives ("don't change X" → "keep X") - Make abstractions concrete ("futuristic" → "glowing cyan neon, metallic panels") - Keep content PG-13 Output only the final instruction in plain text and nothing else."""
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/flux2/system_messages.py", "license": "Apache License 2.0", "lines": 26, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:src/diffusers/models/autoencoders/autoencoder_kl_hunyuanvideo15.py
# Copyright 2025 The Hunyuan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ...utils.accelerate_utils import apply_forward_hook from ..activations import get_activation from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import AutoencoderMixin, DecoderOutput, DiagonalGaussianDistribution logger = logging.get_logger(__name__) # pylint: disable=invalid-name class HunyuanVideo15CausalConv3d(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int | tuple[int, int, int] = 3, stride: int | tuple[int, int, int] = 1, padding: int | tuple[int, int, int] = 0, dilation: int | tuple[int, int, int] = 1, bias: bool = True, pad_mode: str = "replicate", ) -> None: super().__init__() kernel_size = (kernel_size, kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size self.pad_mode = pad_mode self.time_causal_padding = ( kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2, kernel_size[2] - 1, 0, ) self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = F.pad(hidden_states, self.time_causal_padding, mode=self.pad_mode) return self.conv(hidden_states) class HunyuanVideo15RMS_norm(nn.Module): r""" A custom RMS normalization layer. Args: dim (int): The number of dimensions to normalize over. channel_first (bool, optional): Whether the input tensor has channels as the first dimension. Default is True. images (bool, optional): Whether the input represents image data. Default is True. bias (bool, optional): Whether to include a learnable bias term. Default is False. """ def __init__(self, dim: int, channel_first: bool = True, images: bool = True, bias: bool = False) -> None: super().__init__() broadcastable_dims = (1, 1, 1) if not images else (1, 1) shape = (dim, *broadcastable_dims) if channel_first else (dim,) self.channel_first = channel_first self.scale = dim**0.5 self.gamma = nn.Parameter(torch.ones(shape)) self.bias = nn.Parameter(torch.zeros(shape)) if bias else 0.0 def forward(self, x): return F.normalize(x, dim=(1 if self.channel_first else -1)) * self.scale * self.gamma + self.bias class HunyuanVideo15AttnBlock(nn.Module): def __init__(self, in_channels: int): super().__init__() self.in_channels = in_channels self.norm = HunyuanVideo15RMS_norm(in_channels, images=False) self.to_q = nn.Conv3d(in_channels, in_channels, kernel_size=1) self.to_k = nn.Conv3d(in_channels, in_channels, kernel_size=1) self.to_v = nn.Conv3d(in_channels, in_channels, kernel_size=1) self.proj_out = nn.Conv3d(in_channels, in_channels, kernel_size=1) @staticmethod def prepare_causal_attention_mask(n_frame: int, n_hw: int, dtype, device, batch_size: int = None): """Prepare a causal attention mask for 3D videos. Args: n_frame (int): Number of frames (temporal length). n_hw (int): Product of height and width. dtype: Desired mask dtype. device: Device for the mask. batch_size (int, optional): If set, expands for batch. Returns: torch.Tensor: Causal attention mask. """ seq_len = n_frame * n_hw mask = torch.full((seq_len, seq_len), float("-inf"), dtype=dtype, device=device) for i in range(seq_len): i_frame = i // n_hw mask[i, : (i_frame + 1) * n_hw] = 0 if batch_size is not None: mask = mask.unsqueeze(0).expand(batch_size, -1, -1) return mask def forward(self, x: torch.Tensor) -> torch.Tensor: identity = x x = self.norm(x) query = self.to_q(x) key = self.to_k(x) value = self.to_v(x) batch_size, channels, frames, height, width = query.shape query = query.reshape(batch_size, channels, frames * height * width).permute(0, 2, 1).unsqueeze(1).contiguous() key = key.reshape(batch_size, channels, frames * height * width).permute(0, 2, 1).unsqueeze(1).contiguous() value = value.reshape(batch_size, channels, frames * height * width).permute(0, 2, 1).unsqueeze(1).contiguous() attention_mask = self.prepare_causal_attention_mask( frames, height * width, query.dtype, query.device, batch_size=batch_size ) x = nn.functional.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask) # batch_size, 1, frames * height * width, channels x = x.squeeze(1).reshape(batch_size, frames, height, width, channels).permute(0, 4, 1, 2, 3) x = self.proj_out(x) return x + identity class HunyuanVideo15Upsample(nn.Module): def __init__(self, in_channels: int, out_channels: int, add_temporal_upsample: bool = True): super().__init__() factor = 2 * 2 * 2 if add_temporal_upsample else 1 * 2 * 2 self.conv = HunyuanVideo15CausalConv3d(in_channels, out_channels * factor, kernel_size=3) self.add_temporal_upsample = add_temporal_upsample self.repeats = factor * out_channels // in_channels @staticmethod def _dcae_upsample_rearrange(tensor, r1=1, r2=2, r3=2): """ Convert (b, r1*r2*r3*c, f, h, w) -> (b, c, r1*f, r2*h, r3*w) Args: tensor: Input tensor of shape (b, r1*r2*r3*c, f, h, w) r1: temporal upsampling factor r2: height upsampling factor r3: width upsampling factor """ b, packed_c, f, h, w = tensor.shape factor = r1 * r2 * r3 c = packed_c // factor tensor = tensor.view(b, r1, r2, r3, c, f, h, w) tensor = tensor.permute(0, 4, 5, 1, 6, 2, 7, 3) return tensor.reshape(b, c, f * r1, h * r2, w * r3) def forward(self, x: torch.Tensor): r1 = 2 if self.add_temporal_upsample else 1 h = self.conv(x) if self.add_temporal_upsample: h_first = h[:, :, :1, :, :] h_first = self._dcae_upsample_rearrange(h_first, r1=1, r2=2, r3=2) h_first = h_first[:, : h_first.shape[1] // 2] h_next = h[:, :, 1:, :, :] h_next = self._dcae_upsample_rearrange(h_next, r1=r1, r2=2, r3=2) h = torch.cat([h_first, h_next], dim=2) # shortcut computation x_first = x[:, :, :1, :, :] x_first = self._dcae_upsample_rearrange(x_first, r1=1, r2=2, r3=2) x_first = x_first.repeat_interleave(repeats=self.repeats // 2, dim=1) x_next = x[:, :, 1:, :, :] x_next = self._dcae_upsample_rearrange(x_next, r1=r1, r2=2, r3=2) x_next = x_next.repeat_interleave(repeats=self.repeats, dim=1) shortcut = torch.cat([x_first, x_next], dim=2) else: h = self._dcae_upsample_rearrange(h, r1=r1, r2=2, r3=2) shortcut = x.repeat_interleave(repeats=self.repeats, dim=1) shortcut = self._dcae_upsample_rearrange(shortcut, r1=r1, r2=2, r3=2) return h + shortcut class HunyuanVideo15Downsample(nn.Module): def __init__(self, in_channels: int, out_channels: int, add_temporal_downsample: bool = True): super().__init__() factor = 2 * 2 * 2 if add_temporal_downsample else 1 * 2 * 2 self.conv = HunyuanVideo15CausalConv3d(in_channels, out_channels // factor, kernel_size=3) self.add_temporal_downsample = add_temporal_downsample self.group_size = factor * in_channels // out_channels @staticmethod def _dcae_downsample_rearrange(tensor, r1=1, r2=2, r3=2): """ Convert (b, c, r1*f, r2*h, r3*w) -> (b, r1*r2*r3*c, f, h, w) This packs spatial/temporal dimensions into channels (opposite of upsample) """ b, c, packed_f, packed_h, packed_w = tensor.shape f, h, w = packed_f // r1, packed_h // r2, packed_w // r3 tensor = tensor.view(b, c, f, r1, h, r2, w, r3) tensor = tensor.permute(0, 3, 5, 7, 1, 2, 4, 6) return tensor.reshape(b, r1 * r2 * r3 * c, f, h, w) def forward(self, x: torch.Tensor): r1 = 2 if self.add_temporal_downsample else 1 h = self.conv(x) if self.add_temporal_downsample: h_first = h[:, :, :1, :, :] h_first = self._dcae_downsample_rearrange(h_first, r1=1, r2=2, r3=2) h_first = torch.cat([h_first, h_first], dim=1) h_next = h[:, :, 1:, :, :] h_next = self._dcae_downsample_rearrange(h_next, r1=r1, r2=2, r3=2) h = torch.cat([h_first, h_next], dim=2) # shortcut computation x_first = x[:, :, :1, :, :] x_first = self._dcae_downsample_rearrange(x_first, r1=1, r2=2, r3=2) B, C, T, H, W = x_first.shape x_first = x_first.view(B, h.shape[1], self.group_size // 2, T, H, W).mean(dim=2) x_next = x[:, :, 1:, :, :] x_next = self._dcae_downsample_rearrange(x_next, r1=r1, r2=2, r3=2) B, C, T, H, W = x_next.shape x_next = x_next.view(B, h.shape[1], self.group_size, T, H, W).mean(dim=2) shortcut = torch.cat([x_first, x_next], dim=2) else: h = self._dcae_downsample_rearrange(h, r1=r1, r2=2, r3=2) shortcut = self._dcae_downsample_rearrange(x, r1=r1, r2=2, r3=2) B, C, T, H, W = shortcut.shape shortcut = shortcut.view(B, h.shape[1], self.group_size, T, H, W).mean(dim=2) return h + shortcut class HunyuanVideo15ResnetBlock(nn.Module): def __init__( self, in_channels: int, out_channels: int | None = None, non_linearity: str = "swish", ) -> None: super().__init__() out_channels = out_channels or in_channels self.nonlinearity = get_activation(non_linearity) self.norm1 = HunyuanVideo15RMS_norm(in_channels, images=False) self.conv1 = HunyuanVideo15CausalConv3d(in_channels, out_channels, kernel_size=3) self.norm2 = HunyuanVideo15RMS_norm(out_channels, images=False) self.conv2 = HunyuanVideo15CausalConv3d(out_channels, out_channels, kernel_size=3) self.conv_shortcut = None if in_channels != out_channels: self.conv_shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: residual = self.conv_shortcut(residual) return hidden_states + residual class HunyuanVideo15MidBlock(nn.Module): def __init__( self, in_channels: int, num_layers: int = 1, add_attention: bool = True, ) -> None: super().__init__() self.add_attention = add_attention # There is always at least one resnet resnets = [ HunyuanVideo15ResnetBlock( in_channels=in_channels, out_channels=in_channels, ) ] attentions = [] for _ in range(num_layers): if self.add_attention: attentions.append(HunyuanVideo15AttnBlock(in_channels)) else: attentions.append(None) resnets.append( HunyuanVideo15ResnetBlock( in_channels=in_channels, out_channels=in_channels, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.resnets[0](hidden_states) for attn, resnet in zip(self.attentions, self.resnets[1:]): if attn is not None: hidden_states = attn(hidden_states) hidden_states = resnet(hidden_states) return hidden_states class HunyuanVideo15DownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, downsample_out_channels: int | None = None, add_temporal_downsample: int = True, ) -> None: super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( HunyuanVideo15ResnetBlock( in_channels=in_channels, out_channels=out_channels, ) ) self.resnets = nn.ModuleList(resnets) if downsample_out_channels is not None: self.downsamplers = nn.ModuleList( [ HunyuanVideo15Downsample( out_channels, out_channels=downsample_out_channels, add_temporal_downsample=add_temporal_downsample, ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for resnet in self.resnets: hidden_states = resnet(hidden_states) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states class HunyuanVideo15UpBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, num_layers: int = 1, upsample_out_channels: int | None = None, add_temporal_upsample: bool = True, ) -> None: super().__init__() resnets = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels resnets.append( HunyuanVideo15ResnetBlock( in_channels=input_channels, out_channels=out_channels, ) ) self.resnets = nn.ModuleList(resnets) if upsample_out_channels is not None: self.upsamplers = nn.ModuleList( [ HunyuanVideo15Upsample( out_channels, out_channels=upsample_out_channels, add_temporal_upsample=add_temporal_upsample, ) ] ) else: self.upsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if torch.is_grad_enabled() and self.gradient_checkpointing: for resnet in self.resnets: hidden_states = self._gradient_checkpointing_func(resnet, hidden_states) else: for resnet in self.resnets: hidden_states = resnet(hidden_states) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class HunyuanVideo15Encoder3D(nn.Module): r""" 3D vae encoder for HunyuanImageRefiner. """ def __init__( self, in_channels: int = 3, out_channels: int = 64, block_out_channels: tuple[int, ...] = (128, 256, 512, 1024, 1024), layers_per_block: int = 2, temporal_compression_ratio: int = 4, spatial_compression_ratio: int = 16, downsample_match_channel: bool = True, ) -> None: super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.group_size = block_out_channels[-1] // self.out_channels self.conv_in = HunyuanVideo15CausalConv3d(in_channels, block_out_channels[0], kernel_size=3) self.mid_block = None self.down_blocks = nn.ModuleList([]) input_channel = block_out_channels[0] for i in range(len(block_out_channels)): add_spatial_downsample = i < np.log2(spatial_compression_ratio) output_channel = block_out_channels[i] if not add_spatial_downsample: down_block = HunyuanVideo15DownBlock3D( num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, downsample_out_channels=None, add_temporal_downsample=False, ) input_channel = output_channel else: add_temporal_downsample = i >= np.log2(spatial_compression_ratio // temporal_compression_ratio) downsample_out_channels = block_out_channels[i + 1] if downsample_match_channel else output_channel down_block = HunyuanVideo15DownBlock3D( num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, downsample_out_channels=downsample_out_channels, add_temporal_downsample=add_temporal_downsample, ) input_channel = downsample_out_channels self.down_blocks.append(down_block) self.mid_block = HunyuanVideo15MidBlock(in_channels=block_out_channels[-1]) self.norm_out = HunyuanVideo15RMS_norm(block_out_channels[-1], images=False) self.conv_act = nn.SiLU() self.conv_out = HunyuanVideo15CausalConv3d(block_out_channels[-1], out_channels, kernel_size=3) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.conv_in(hidden_states) if torch.is_grad_enabled() and self.gradient_checkpointing: for down_block in self.down_blocks: hidden_states = self._gradient_checkpointing_func(down_block, hidden_states) hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states) else: for down_block in self.down_blocks: hidden_states = down_block(hidden_states) hidden_states = self.mid_block(hidden_states) batch_size, _, frame, height, width = hidden_states.shape short_cut = hidden_states.view(batch_size, -1, self.group_size, frame, height, width).mean(dim=2) hidden_states = self.norm_out(hidden_states) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) hidden_states += short_cut return hidden_states class HunyuanVideo15Decoder3D(nn.Module): r""" Causal decoder for 3D video-like data used for HunyuanImage-1.5 Refiner. """ def __init__( self, in_channels: int = 32, out_channels: int = 3, block_out_channels: tuple[int, ...] = (1024, 1024, 512, 256, 128), layers_per_block: int = 2, spatial_compression_ratio: int = 16, temporal_compression_ratio: int = 4, upsample_match_channel: bool = True, ): super().__init__() self.layers_per_block = layers_per_block self.in_channels = in_channels self.out_channels = out_channels self.repeat = block_out_channels[0] // self.in_channels self.conv_in = HunyuanVideo15CausalConv3d(self.in_channels, block_out_channels[0], kernel_size=3) self.up_blocks = nn.ModuleList([]) # mid self.mid_block = HunyuanVideo15MidBlock(in_channels=block_out_channels[0]) # up input_channel = block_out_channels[0] for i in range(len(block_out_channels)): output_channel = block_out_channels[i] add_spatial_upsample = i < np.log2(spatial_compression_ratio) add_temporal_upsample = i < np.log2(temporal_compression_ratio) if add_spatial_upsample or add_temporal_upsample: upsample_out_channels = block_out_channels[i + 1] if upsample_match_channel else output_channel up_block = HunyuanVideo15UpBlock3D( num_layers=self.layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, upsample_out_channels=upsample_out_channels, add_temporal_upsample=add_temporal_upsample, ) input_channel = upsample_out_channels else: up_block = HunyuanVideo15UpBlock3D( num_layers=self.layers_per_block + 1, in_channels=input_channel, out_channels=output_channel, upsample_out_channels=None, add_temporal_upsample=False, ) input_channel = output_channel self.up_blocks.append(up_block) # out self.norm_out = HunyuanVideo15RMS_norm(block_out_channels[-1], images=False) self.conv_act = nn.SiLU() self.conv_out = HunyuanVideo15CausalConv3d(block_out_channels[-1], out_channels, kernel_size=3) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.conv_in(hidden_states) + hidden_states.repeat_interleave(repeats=self.repeat, dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func(self.mid_block, hidden_states) for up_block in self.up_blocks: hidden_states = self._gradient_checkpointing_func(up_block, hidden_states) else: hidden_states = self.mid_block(hidden_states) for up_block in self.up_blocks: hidden_states = up_block(hidden_states) # post-process hidden_states = self.norm_out(hidden_states) hidden_states = self.conv_act(hidden_states) hidden_states = self.conv_out(hidden_states) return hidden_states class AutoencoderKLHunyuanVideo15(ModelMixin, AutoencoderMixin, ConfigMixin): r""" A VAE model with KL loss for encoding videos into latents and decoding latent representations into videos. Used for HunyuanVideo-1.5. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, latent_channels: int = 32, block_out_channels: tuple[int] = (128, 256, 512, 1024, 1024), layers_per_block: int = 2, spatial_compression_ratio: int = 16, temporal_compression_ratio: int = 4, downsample_match_channel: bool = True, upsample_match_channel: bool = True, scaling_factor: float = 1.03682, ) -> None: super().__init__() self.encoder = HunyuanVideo15Encoder3D( in_channels=in_channels, out_channels=latent_channels * 2, block_out_channels=block_out_channels, layers_per_block=layers_per_block, temporal_compression_ratio=temporal_compression_ratio, spatial_compression_ratio=spatial_compression_ratio, downsample_match_channel=downsample_match_channel, ) self.decoder = HunyuanVideo15Decoder3D( in_channels=latent_channels, out_channels=out_channels, block_out_channels=list(reversed(block_out_channels)), layers_per_block=layers_per_block, temporal_compression_ratio=temporal_compression_ratio, spatial_compression_ratio=spatial_compression_ratio, upsample_match_channel=upsample_match_channel, ) self.spatial_compression_ratio = spatial_compression_ratio self.temporal_compression_ratio = temporal_compression_ratio # When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension # to perform decoding of a single video latent at a time. self.use_slicing = False # When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent # frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the # intermediate tiles together, the memory requirement can be lowered. self.use_tiling = False # The minimal tile height and width for spatial tiling to be used self.tile_sample_min_height = 256 self.tile_sample_min_width = 256 # The minimal tile height and width in latent space self.tile_latent_min_height = self.tile_sample_min_height // spatial_compression_ratio self.tile_latent_min_width = self.tile_sample_min_width // spatial_compression_ratio self.tile_overlap_factor = 0.25 def enable_tiling( self, tile_sample_min_height: int | None = None, tile_sample_min_width: int | None = None, tile_latent_min_height: int | None = None, tile_latent_min_width: int | None = None, tile_overlap_factor: float | None = None, ) -> None: r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. Args: tile_sample_min_height (`int`, *optional*): The minimum height required for a sample to be separated into tiles across the height dimension. tile_sample_min_width (`int`, *optional*): The minimum width required for a sample to be separated into tiles across the width dimension. tile_latent_min_height (`int`, *optional*): The minimum height required for a latent to be separated into tiles across the height dimension. tile_latent_min_width (`int`, *optional*): The minimum width required for a latent to be separated into tiles across the width dimension. """ self.use_tiling = True self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width self.tile_latent_min_height = tile_latent_min_height or self.tile_latent_min_height self.tile_latent_min_width = tile_latent_min_width or self.tile_latent_min_width self.tile_overlap_factor = tile_overlap_factor or self.tile_overlap_factor def _encode(self, x: torch.Tensor) -> torch.Tensor: _, _, _, height, width = x.shape if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height): return self.tiled_encode(x) x = self.encoder(x) return x @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> AutoencoderKLOutput | tuple[DiagonalGaussianDistribution]: r""" Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded videos. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor) -> torch.Tensor: _, _, _, height, width = z.shape if self.use_tiling and (width > self.tile_latent_min_width or height > self.tile_latent_min_height): return self.tiled_decode(z) dec = self.decoder(z) return dec @apply_forward_hook def decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: r""" Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice) for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z) if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-2], b.shape[-2], blend_extent) for y in range(blend_extent): b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( y / blend_extent ) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-1], b.shape[-1], blend_extent) for x in range(blend_extent): b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( x / blend_extent ) return b def blend_t(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[-3], b.shape[-3], blend_extent) for x in range(blend_extent): b[:, :, x, :, :] = a[:, :, -blend_extent + x, :, :] * (1 - x / blend_extent) + b[:, :, x, :, :] * ( x / blend_extent ) return b def tiled_encode(self, x: torch.Tensor) -> torch.Tensor: r"""Encode a batch of images using a tiled encoder. Args: x (`torch.Tensor`): Input batch of videos. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ _, _, _, height, width = x.shape overlap_height = int(self.tile_sample_min_height * (1 - self.tile_overlap_factor)) # 256 * (1 - 0.25) = 192 overlap_width = int(self.tile_sample_min_width * (1 - self.tile_overlap_factor)) # 256 * (1 - 0.25) = 192 blend_height = int(self.tile_latent_min_height * self.tile_overlap_factor) # 8 * 0.25 = 2 blend_width = int(self.tile_latent_min_width * self.tile_overlap_factor) # 8 * 0.25 = 2 row_limit_height = self.tile_latent_min_height - blend_height # 8 - 2 = 6 row_limit_width = self.tile_latent_min_width - blend_width # 8 - 2 = 6 rows = [] for i in range(0, height, overlap_height): row = [] for j in range(0, width, overlap_width): tile = x[ :, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width, ] tile = self.encoder(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width]) result_rows.append(torch.cat(result_row, dim=-1)) moments = torch.cat(result_rows, dim=-2) return moments def tiled_decode(self, z: torch.Tensor) -> torch.Tensor: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ _, _, _, height, width = z.shape overlap_height = int(self.tile_latent_min_height * (1 - self.tile_overlap_factor)) # 8 * (1 - 0.25) = 6 overlap_width = int(self.tile_latent_min_width * (1 - self.tile_overlap_factor)) # 8 * (1 - 0.25) = 6 blend_height = int(self.tile_sample_min_height * self.tile_overlap_factor) # 256 * 0.25 = 64 blend_width = int(self.tile_sample_min_width * self.tile_overlap_factor) # 256 * 0.25 = 64 row_limit_height = self.tile_sample_min_height - blend_height # 256 - 64 = 192 row_limit_width = self.tile_sample_min_width - blend_width # 256 - 64 = 192 rows = [] for i in range(0, height, overlap_height): row = [] for j in range(0, width, overlap_width): tile = z[ :, :, :, i : i + self.tile_latent_min_height, j : j + self.tile_latent_min_width, ] decoded = self.decoder(tile) row.append(decoded) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_height) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_width) result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width]) result_rows.append(torch.cat(result_row, dim=-1)) dec = torch.cat(result_rows, dim=-2) return dec def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: torch.Generator | None = None, ) -> DecoderOutput | torch.Tensor: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, return_dict=return_dict) return dec
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/autoencoders/autoencoder_kl_hunyuanvideo15.py", "license": "Apache License 2.0", "lines": 768, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/models/transformers/transformer_hunyuan_video15.py
# Copyright 2025 The Hunyuan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from diffusers.loaders import FromOriginalModelMixin from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...utils import apply_lora_scale, logging from ..attention import AttentionMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..attention_processor import Attention from ..cache_utils import CacheMixin from ..embeddings import ( CombinedTimestepTextProjEmbeddings, TimestepEmbedding, Timesteps, get_1d_rotary_pos_embed, ) from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero logger = logging.get_logger(__name__) # pylint: disable=invalid-name class HunyuanVideo15AttnProcessor2_0: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "HunyuanVideo15AttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: # 1. QKV projections query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) query = query.unflatten(2, (attn.heads, -1)) key = key.unflatten(2, (attn.heads, -1)) value = value.unflatten(2, (attn.heads, -1)) # 2. QK normalization query = attn.norm_q(query) key = attn.norm_k(key) # 3. Rotational positional embeddings applied to latent stream if image_rotary_emb is not None: from ..embeddings import apply_rotary_emb query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) # 4. Encoder condition QKV projection and normalization if encoder_hidden_states is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) encoder_query = encoder_query.unflatten(2, (attn.heads, -1)) encoder_key = encoder_key.unflatten(2, (attn.heads, -1)) encoder_value = encoder_value.unflatten(2, (attn.heads, -1)) if attn.norm_added_q is not None: encoder_query = attn.norm_added_q(encoder_query) if attn.norm_added_k is not None: encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([query, encoder_query], dim=1) key = torch.cat([key, encoder_key], dim=1) value = torch.cat([value, encoder_value], dim=1) batch_size, seq_len, heads, dim = query.shape attention_mask = F.pad(attention_mask, (seq_len - attention_mask.shape[1], 0), value=True) attention_mask = attention_mask.bool() self_attn_mask_1 = attention_mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1) self_attn_mask_2 = self_attn_mask_1.transpose(2, 3) attention_mask = (self_attn_mask_1 & self_attn_mask_2).bool() # 5. Attention hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, parallel_config=self._parallel_config, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) # 6. Output projection if encoder_hidden_states is not None: hidden_states, encoder_hidden_states = ( hidden_states[:, : -encoder_hidden_states.shape[1]], hidden_states[:, -encoder_hidden_states.shape[1] :], ) if getattr(attn, "to_out", None) is not None: hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if getattr(attn, "to_add_out", None) is not None: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) return hidden_states, encoder_hidden_states class HunyuanVideo15PatchEmbed(nn.Module): def __init__( self, patch_size: int | tuple[int, int, int] = 16, in_chans: int = 3, embed_dim: int = 768, ) -> None: super().__init__() patch_size = (patch_size, patch_size, patch_size) if isinstance(patch_size, int) else patch_size self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.proj(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) # BCFHW -> BNC return hidden_states class HunyuanVideo15AdaNorm(nn.Module): def __init__(self, in_features: int, out_features: int | None = None) -> None: super().__init__() out_features = out_features or 2 * in_features self.linear = nn.Linear(in_features, out_features) self.nonlinearity = nn.SiLU() def forward( self, temb: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: temb = self.linear(self.nonlinearity(temb)) gate_msa, gate_mlp = temb.chunk(2, dim=1) gate_msa, gate_mlp = gate_msa.unsqueeze(1), gate_mlp.unsqueeze(1) return gate_msa, gate_mlp class HunyuanVideo15TimeEmbedding(nn.Module): r""" Time embedding for HunyuanVideo 1.5. Supports standard timestep embedding and optional reference timestep embedding for MeanFlow-based super-resolution models. Args: embedding_dim (`int`): The dimension of the output embedding. """ def __init__(self, embedding_dim: int, use_meanflow: bool = False): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.use_meanflow = use_meanflow self.time_proj_r = None self.timestep_embedder_r = None if use_meanflow: self.time_proj_r = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder_r = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) def forward( self, timestep: torch.Tensor, timestep_r: torch.Tensor | None = None, ) -> torch.Tensor: timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=timestep.dtype)) if timestep_r is not None: timesteps_proj_r = self.time_proj_r(timestep_r) timesteps_emb_r = self.timestep_embedder_r(timesteps_proj_r.to(dtype=timestep.dtype)) timesteps_emb = timesteps_emb + timesteps_emb_r return timesteps_emb class HunyuanVideo15IndividualTokenRefinerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_width_ratio: str = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6) self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, heads=num_attention_heads, dim_head=attention_head_dim, bias=attention_bias, ) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6) self.ff = FeedForward(hidden_size, mult=mlp_width_ratio, activation_fn="linear-silu", dropout=mlp_drop_rate) self.norm_out = HunyuanVideo15AdaNorm(hidden_size, 2 * hidden_size) def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: torch.Tensor | None = None, ) -> torch.Tensor: norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=None, attention_mask=attention_mask, ) gate_msa, gate_mlp = self.norm_out(temb) hidden_states = hidden_states + attn_output * gate_msa ff_output = self.ff(self.norm2(hidden_states)) hidden_states = hidden_states + ff_output * gate_mlp return hidden_states class HunyuanVideo15IndividualTokenRefiner(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, num_layers: int, mlp_width_ratio: float = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() self.refiner_blocks = nn.ModuleList( [ HunyuanVideo15IndividualTokenRefinerBlock( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, mlp_width_ratio=mlp_width_ratio, mlp_drop_rate=mlp_drop_rate, attention_bias=attention_bias, ) for _ in range(num_layers) ] ) def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: torch.Tensor | None = None, ) -> None: self_attn_mask = None if attention_mask is not None: batch_size = attention_mask.shape[0] seq_len = attention_mask.shape[1] attention_mask = attention_mask.to(hidden_states.device).bool() self_attn_mask_1 = attention_mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1) self_attn_mask_2 = self_attn_mask_1.transpose(2, 3) self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool() for block in self.refiner_blocks: hidden_states = block(hidden_states, temb, self_attn_mask) return hidden_states class HunyuanVideo15TokenRefiner(nn.Module): def __init__( self, in_channels: int, num_attention_heads: int, attention_head_dim: int, num_layers: int, mlp_ratio: float = 4.0, mlp_drop_rate: float = 0.0, attention_bias: bool = True, ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.time_text_embed = CombinedTimestepTextProjEmbeddings( embedding_dim=hidden_size, pooled_projection_dim=in_channels ) self.proj_in = nn.Linear(in_channels, hidden_size, bias=True) self.token_refiner = HunyuanVideo15IndividualTokenRefiner( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, num_layers=num_layers, mlp_width_ratio=mlp_ratio, mlp_drop_rate=mlp_drop_rate, attention_bias=attention_bias, ) def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, attention_mask: torch.LongTensor | None = None, ) -> torch.Tensor: if attention_mask is None: pooled_projections = hidden_states.mean(dim=1) else: original_dtype = hidden_states.dtype mask_float = attention_mask.float().unsqueeze(-1) pooled_projections = (hidden_states * mask_float).sum(dim=1) / mask_float.sum(dim=1) pooled_projections = pooled_projections.to(original_dtype) temb = self.time_text_embed(timestep, pooled_projections) hidden_states = self.proj_in(hidden_states) hidden_states = self.token_refiner(hidden_states, temb, attention_mask) return hidden_states class HunyuanVideo15RotaryPosEmbed(nn.Module): def __init__(self, patch_size: int, patch_size_t: int, rope_dim: list[int], theta: float = 256.0) -> None: super().__init__() self.patch_size = patch_size self.patch_size_t = patch_size_t self.rope_dim = rope_dim self.theta = theta def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape rope_sizes = [num_frames // self.patch_size_t, height // self.patch_size, width // self.patch_size] axes_grids = [] for i in range(len(rope_sizes)): # Note: The following line diverges from original behaviour. We create the grid on the device, whereas # original implementation creates it on CPU and then moves it to device. This results in numerical # differences in layerwise debugging outputs, but visually it is the same. grid = torch.arange(0, rope_sizes[i], device=hidden_states.device, dtype=torch.float32) axes_grids.append(grid) grid = torch.meshgrid(*axes_grids, indexing="ij") # [W, H, T] grid = torch.stack(grid, dim=0) # [3, W, H, T] freqs = [] for i in range(3): freq = get_1d_rotary_pos_embed(self.rope_dim[i], grid[i].reshape(-1), self.theta, use_real=True) freqs.append(freq) freqs_cos = torch.cat([f[0] for f in freqs], dim=1) # (W * H * T, D / 2) freqs_sin = torch.cat([f[1] for f in freqs], dim=1) # (W * H * T, D / 2) return freqs_cos, freqs_sin class HunyuanVideo15ByT5TextProjection(nn.Module): def __init__(self, in_features: int, hidden_size: int, out_features: int): super().__init__() self.norm = nn.LayerNorm(in_features) self.linear_1 = nn.Linear(in_features, hidden_size) self.linear_2 = nn.Linear(hidden_size, hidden_size) self.linear_3 = nn.Linear(hidden_size, out_features) self.act_fn = nn.GELU() def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.norm(encoder_hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_3(hidden_states) return hidden_states class HunyuanVideo15ImageProjection(nn.Module): def __init__(self, in_channels: int, hidden_size: int): super().__init__() self.norm_in = nn.LayerNorm(in_channels) self.linear_1 = nn.Linear(in_channels, in_channels) self.act_fn = nn.GELU() self.linear_2 = nn.Linear(in_channels, hidden_size) self.norm_out = nn.LayerNorm(hidden_size) def forward(self, image_embeds: torch.Tensor) -> torch.Tensor: hidden_states = self.norm_in(image_embeds) hidden_states = self.linear_1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.linear_2(hidden_states) hidden_states = self.norm_out(hidden_states) return hidden_states class HunyuanVideo15TransformerBlock(nn.Module): def __init__( self, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float, qk_norm: str = "rms_norm", ) -> None: super().__init__() hidden_size = num_attention_heads * attention_head_dim self.norm1 = AdaLayerNormZero(hidden_size, norm_type="layer_norm") self.norm1_context = AdaLayerNormZero(hidden_size, norm_type="layer_norm") self.attn = Attention( query_dim=hidden_size, cross_attention_dim=None, added_kv_proj_dim=hidden_size, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=hidden_size, context_pre_only=False, bias=True, processor=HunyuanVideo15AttnProcessor2_0(), qk_norm=qk_norm, eps=1e-6, ) self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, attention_mask: torch.Tensor | None = None, freqs_cis: tuple[torch.Tensor, torch.Tensor] | None = None, *args, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: # 1. Input normalization norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) # 2. Joint attention attn_output, context_attn_output = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=attention_mask, image_rotary_emb=freqs_cis, ) # 3. Modulation and residual connection hidden_states = hidden_states + attn_output * gate_msa.unsqueeze(1) encoder_hidden_states = encoder_hidden_states + context_attn_output * c_gate_msa.unsqueeze(1) norm_hidden_states = self.norm2(hidden_states) norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] # 4. Feed-forward ff_output = self.ff(norm_hidden_states) context_ff_output = self.ff_context(norm_encoder_hidden_states) hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output return hidden_states, encoder_hidden_states class HunyuanVideo15Transformer3DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin ): r""" A Transformer model for video-like data used in [HunyuanVideo1.5](https://huggingface.co/tencent/HunyuanVideo1.5). Args: in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, defaults to `16`): The number of channels in the output. num_attention_heads (`int`, defaults to `24`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `128`): The number of channels in each head. num_layers (`int`, defaults to `20`): The number of layers of dual-stream blocks to use. num_refiner_layers (`int`, defaults to `2`): The number of layers of refiner blocks to use. mlp_ratio (`float`, defaults to `4.0`): The ratio of the hidden layer size to the input size in the feedforward network. patch_size (`int`, defaults to `2`): The size of the spatial patches to use in the patch embedding layer. patch_size_t (`int`, defaults to `1`): The size of the tmeporal patches to use in the patch embedding layer. qk_norm (`str`, defaults to `rms_norm`): The normalization to use for the query and key projections in the attention layers. guidance_embeds (`bool`, defaults to `True`): Whether to use guidance embeddings in the model. text_embed_dim (`int`, defaults to `4096`): Input dimension of text embeddings from the text encoder. pooled_projection_dim (`int`, defaults to `768`): The dimension of the pooled projection of the text embeddings. rope_theta (`float`, defaults to `256.0`): The value of theta to use in the RoPE layer. rope_axes_dim (`tuple[int]`, defaults to `(16, 56, 56)`): The dimensions of the axes to use in the RoPE layer. """ _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["x_embedder", "context_embedder", "norm"] _no_split_modules = [ "HunyuanVideo15TransformerBlock", "HunyuanVideo15PatchEmbed", "HunyuanVideo15TokenRefiner", ] _repeated_blocks = [ "HunyuanVideo15TransformerBlock", "HunyuanVideo15PatchEmbed", "HunyuanVideo15TokenRefiner", ] @register_to_config def __init__( self, in_channels: int = 65, out_channels: int = 32, num_attention_heads: int = 16, attention_head_dim: int = 128, num_layers: int = 54, num_refiner_layers: int = 2, mlp_ratio: float = 4.0, patch_size: int = 1, patch_size_t: int = 1, qk_norm: str = "rms_norm", text_embed_dim: int = 3584, text_embed_2_dim: int = 1472, image_embed_dim: int = 1152, rope_theta: float = 256.0, rope_axes_dim: tuple[int, ...] = (16, 56, 56), # YiYi Notes: config based on target_size_config https://github.com/yiyixuxu/hy15/blob/main/hyvideo/pipelines/hunyuan_video_pipeline.py#L205 target_size: int = 640, # did not name sample_size since it is in pixel spaces task_type: str = "i2v", use_meanflow: bool = False, ) -> None: super().__init__() inner_dim = num_attention_heads * attention_head_dim out_channels = out_channels or in_channels # 1. Latent and condition embedders self.x_embedder = HunyuanVideo15PatchEmbed((patch_size_t, patch_size, patch_size), in_channels, inner_dim) self.image_embedder = HunyuanVideo15ImageProjection(image_embed_dim, inner_dim) self.context_embedder = HunyuanVideo15TokenRefiner( text_embed_dim, num_attention_heads, attention_head_dim, num_layers=num_refiner_layers ) self.context_embedder_2 = HunyuanVideo15ByT5TextProjection(text_embed_2_dim, 2048, inner_dim) self.time_embed = HunyuanVideo15TimeEmbedding(inner_dim, use_meanflow=use_meanflow) self.cond_type_embed = nn.Embedding(3, inner_dim) # 2. RoPE self.rope = HunyuanVideo15RotaryPosEmbed(patch_size, patch_size_t, rope_axes_dim, rope_theta) # 3. Dual stream transformer blocks self.transformer_blocks = nn.ModuleList( [ HunyuanVideo15TransformerBlock( num_attention_heads, attention_head_dim, mlp_ratio=mlp_ratio, qk_norm=qk_norm ) for _ in range(num_layers) ] ) # 5. Output projection self.norm_out = AdaLayerNormContinuous(inner_dim, inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(inner_dim, patch_size_t * patch_size * patch_size * out_channels) self.gradient_checkpointing = False @apply_lora_scale("attention_kwargs") def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_hidden_states: torch.Tensor, encoder_attention_mask: torch.Tensor, timestep_r: torch.LongTensor | None = None, encoder_hidden_states_2: torch.Tensor | None = None, encoder_attention_mask_2: torch.Tensor | None = None, image_embeds: torch.Tensor | None = None, attention_kwargs: dict[str, Any] | None = None, return_dict: bool = True, ) -> tuple[torch.Tensor] | Transformer2DModelOutput: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.config.patch_size_t, self.config.patch_size, self.config.patch_size post_patch_num_frames = num_frames // p_t post_patch_height = height // p_h post_patch_width = width // p_w # 1. RoPE image_rotary_emb = self.rope(hidden_states) # 2. Conditional embeddings temb = self.time_embed(timestep, timestep_r=timestep_r) hidden_states = self.x_embedder(hidden_states) # qwen text embedding encoder_hidden_states = self.context_embedder(encoder_hidden_states, timestep, encoder_attention_mask) encoder_hidden_states_cond_emb = self.cond_type_embed( torch.zeros_like(encoder_hidden_states[:, :, 0], dtype=torch.long) ) encoder_hidden_states = encoder_hidden_states + encoder_hidden_states_cond_emb # byt5 text embedding encoder_hidden_states_2 = self.context_embedder_2(encoder_hidden_states_2) encoder_hidden_states_2_cond_emb = self.cond_type_embed( torch.ones_like(encoder_hidden_states_2[:, :, 0], dtype=torch.long) ) encoder_hidden_states_2 = encoder_hidden_states_2 + encoder_hidden_states_2_cond_emb # image embed encoder_hidden_states_3 = self.image_embedder(image_embeds) is_t2v = torch.all(image_embeds == 0) if is_t2v: encoder_hidden_states_3 = encoder_hidden_states_3 * 0.0 encoder_attention_mask_3 = torch.zeros( (batch_size, encoder_hidden_states_3.shape[1]), dtype=encoder_attention_mask.dtype, device=encoder_attention_mask.device, ) else: encoder_attention_mask_3 = torch.ones( (batch_size, encoder_hidden_states_3.shape[1]), dtype=encoder_attention_mask.dtype, device=encoder_attention_mask.device, ) encoder_hidden_states_3_cond_emb = self.cond_type_embed( 2 * torch.ones_like( encoder_hidden_states_3[:, :, 0], dtype=torch.long, ) ) encoder_hidden_states_3 = encoder_hidden_states_3 + encoder_hidden_states_3_cond_emb # reorder and combine text tokens: combine valid tokens first, then padding encoder_attention_mask = encoder_attention_mask.bool() encoder_attention_mask_2 = encoder_attention_mask_2.bool() encoder_attention_mask_3 = encoder_attention_mask_3.bool() new_encoder_hidden_states = [] new_encoder_attention_mask = [] for text, text_mask, text_2, text_mask_2, image, image_mask in zip( encoder_hidden_states, encoder_attention_mask, encoder_hidden_states_2, encoder_attention_mask_2, encoder_hidden_states_3, encoder_attention_mask_3, ): # Concatenate: [valid_image, valid_byt5, valid_mllm, invalid_image, invalid_byt5, invalid_mllm] new_encoder_hidden_states.append( torch.cat( [ image[image_mask], # valid image text_2[text_mask_2], # valid byt5 text[text_mask], # valid mllm image[~image_mask], # invalid image torch.zeros_like(text_2[~text_mask_2]), # invalid byt5 (zeroed) torch.zeros_like(text[~text_mask]), # invalid mllm (zeroed) ], dim=0, ) ) # Apply same reordering to attention masks new_encoder_attention_mask.append( torch.cat( [ image_mask[image_mask], text_mask_2[text_mask_2], text_mask[text_mask], image_mask[~image_mask], text_mask_2[~text_mask_2], text_mask[~text_mask], ], dim=0, ) ) encoder_hidden_states = torch.stack(new_encoder_hidden_states) encoder_attention_mask = torch.stack(new_encoder_attention_mask) # 4. Transformer blocks if torch.is_grad_enabled() and self.gradient_checkpointing: for block in self.transformer_blocks: hidden_states, encoder_hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, encoder_attention_mask, image_rotary_emb, ) else: for block in self.transformer_blocks: hidden_states, encoder_hidden_states = block( hidden_states, encoder_hidden_states, temb, encoder_attention_mask, image_rotary_emb, ) # 5. Output projection hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, -1, p_t, p_h, p_w ) hidden_states = hidden_states.permute(0, 4, 1, 5, 2, 6, 3, 7) hidden_states = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) if not return_dict: return (hidden_states,) return Transformer2DModelOutput(sample=hidden_states)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_hunyuan_video15.py", "license": "Apache License 2.0", "lines": 646, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/hunyuan_video1_5/image_processor.py
# Copyright 2025 The HunyuanVideo Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ...configuration_utils import register_to_config from ...video_processor import VideoProcessor # copied from https://github.com/Tencent-Hunyuan/HunyuanVideo-1.5/blob/main/hyvideo/utils/data_utils.py#L20 def generate_crop_size_list(base_size=256, patch_size=16, max_ratio=4.0): num_patches = round((base_size / patch_size) ** 2) assert max_ratio >= 1.0 crop_size_list = [] wp, hp = num_patches, 1 while wp > 0: if max(wp, hp) / min(wp, hp) <= max_ratio: crop_size_list.append((wp * patch_size, hp * patch_size)) if (hp + 1) * wp <= num_patches: hp += 1 else: wp -= 1 return crop_size_list # copied from https://github.com/Tencent-Hunyuan/HunyuanVideo-1.5/blob/main/hyvideo/utils/data_utils.py#L38 def get_closest_ratio(height: float, width: float, ratios: list, buckets: list): """ Get the closest ratio in the buckets. Args: height (float): video height width (float): video width ratios (list): video aspect ratio buckets (list): buckets generated by `generate_crop_size_list` Returns: the closest size in the buckets and the corresponding ratio """ aspect_ratio = float(height) / float(width) diff_ratios = ratios - aspect_ratio if aspect_ratio >= 1: indices = [(index, x) for index, x in enumerate(diff_ratios) if x <= 0] else: indices = [(index, x) for index, x in enumerate(diff_ratios) if x >= 0] closest_ratio_id = min(indices, key=lambda pair: abs(pair[1]))[0] closest_size = buckets[closest_ratio_id] closest_ratio = ratios[closest_ratio_id] return closest_size, closest_ratio class HunyuanVideo15ImageProcessor(VideoProcessor): r""" Image/video processor to preproces/postprocess the reference image/generatedvideo for the HunyuanVideo1.5 model. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method. vae_scale_factor (`int`, *optional*, defaults to `16`): VAE (spatial) scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. vae_latent_channels (`int`, *optional*, defaults to `32`): VAE latent channels. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ @register_to_config def __init__( self, do_resize: bool = True, vae_scale_factor: int = 16, vae_latent_channels: int = 32, do_convert_rgb: bool = True, ): super().__init__( do_resize=do_resize, vae_scale_factor=vae_scale_factor, vae_latent_channels=vae_latent_channels, do_convert_rgb=do_convert_rgb, ) def calculate_default_height_width(self, height: int, width: int, target_size: int): crop_size_list = generate_crop_size_list(base_size=target_size, patch_size=self.config.vae_scale_factor) aspect_ratios = np.array([round(float(h) / float(w), 5) for h, w in crop_size_list]) height, width = get_closest_ratio(height, width, aspect_ratios, crop_size_list)[0] return height, width
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/hunyuan_video1_5/image_processor.py", "license": "Apache License 2.0", "lines": 86, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5.py
# Copyright 2025 The HunyuanVideo Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import re from typing import Any import numpy as np import torch from transformers import ByT5Tokenizer, Qwen2_5_VLTextModel, Qwen2Tokenizer, T5EncoderModel from ...guiders import ClassifierFreeGuidance from ...models import AutoencoderKLHunyuanVideo15, HunyuanVideo15Transformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .image_processor import HunyuanVideo15ImageProcessor from .pipeline_output import HunyuanVideo15PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import HunyuanVideo15Pipeline >>> from diffusers.utils import export_to_video >>> model_id = "hunyuanvideo-community/HunyuanVideo-1.5-480p_t2v" >>> pipe = HunyuanVideo15Pipeline.from_pretrained(model_id, torch_dtype=torch.float16) >>> pipe.vae.enable_tiling() >>> pipe.to("cuda") >>> output = pipe( ... prompt="A cat walks on the grass, realistic", ... num_inference_steps=50, ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=15) ``` """ def format_text_input(prompt: list[str], system_message: str) -> list[dict[str, Any]]: """ Apply text to template. Args: prompt (list[str]): Input text. system_message (str): System message. Returns: list[dict[str, Any]]: List of chat conversation. """ template = [ [{"role": "system", "content": system_message}, {"role": "user", "content": p if p else " "}] for p in prompt ] return template def extract_glyph_texts(prompt: str) -> list[str]: """ Extract glyph texts from prompt using regex pattern. Args: prompt: Input prompt string Returns: List of extracted glyph texts """ pattern = r"\"(.*?)\"|“(.*?)”" matches = re.findall(pattern, prompt) result = [match[0] or match[1] for match in matches] result = list(dict.fromkeys(result)) if len(result) > 1 else result if result: formatted_result = ". ".join([f'Text "{text}"' for text in result]) + ". " else: formatted_result = None return formatted_result # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class HunyuanVideo15Pipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using HunyuanVideo1.5. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: transformer ([`HunyuanVideo15Transformer3DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded video latents. vae ([`AutoencoderKLHunyuanVideo15`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder ([`Qwen2.5-VL-7B-Instruct`]): [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant. tokenizer (`Qwen2Tokenizer`): Tokenizer of class [Qwen2Tokenizer]. text_encoder_2 ([`T5EncoderModel`]): [T5EncoderModel](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel) variant. tokenizer_2 (`ByT5Tokenizer`): Tokenizer of class [ByT5Tokenizer] guider ([`ClassifierFreeGuidance`]): [ClassifierFreeGuidance]for classifier free guidance. """ model_cpu_offload_seq = "text_encoder->transformer->vae" def __init__( self, text_encoder: Qwen2_5_VLTextModel, tokenizer: Qwen2Tokenizer, transformer: HunyuanVideo15Transformer3DModel, vae: AutoencoderKLHunyuanVideo15, scheduler: FlowMatchEulerDiscreteScheduler, text_encoder_2: T5EncoderModel, tokenizer_2: ByT5Tokenizer, guider: ClassifierFreeGuidance, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, guider=guider, ) self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 16 self.video_processor = HunyuanVideo15ImageProcessor(vae_scale_factor=self.vae_scale_factor_spatial) self.target_size = self.transformer.config.target_size if getattr(self, "transformer", None) else 640 self.vision_states_dim = ( self.transformer.config.image_embed_dim if getattr(self, "transformer", None) else 1152 ) self.num_channels_latents = self.vae.config.latent_channels if hasattr(self, "vae") else 32 # fmt: off self.system_message = "You are a helpful assistant. Describe the video by detailing the following aspects: \ 1. The main content and theme of the video. \ 2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects. \ 3. Actions, events, behaviors temporal relationships, physical movement changes of the objects. \ 4. background environment, light, style and atmosphere. \ 5. camera angles, movements, and transitions used in the video." # fmt: on self.prompt_template_encode_start_idx = 108 self.tokenizer_max_length = 1000 self.tokenizer_2_max_length = 256 self.vision_num_semantic_tokens = 729 self.default_aspect_ratio = (16, 9) # (width: height) @staticmethod def _get_mllm_prompt_embeds( text_encoder: Qwen2_5_VLTextModel, tokenizer: Qwen2Tokenizer, prompt: str | list[str], device: torch.device, tokenizer_max_length: int = 1000, num_hidden_layers_to_skip: int = 2, # fmt: off system_message: str = "You are a helpful assistant. Describe the video by detailing the following aspects: \ 1. The main content and theme of the video. \ 2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects. \ 3. Actions, events, behaviors temporal relationships, physical movement changes of the objects. \ 4. background environment, light, style and atmosphere. \ 5. camera angles, movements, and transitions used in the video.", # fmt: on crop_start: int = 108, ) -> tuple[torch.Tensor, torch.Tensor]: prompt = [prompt] if isinstance(prompt, str) else prompt prompt = format_text_input(prompt, system_message) text_inputs = tokenizer.apply_chat_template( prompt, add_generation_prompt=True, tokenize=True, return_dict=True, padding="max_length", max_length=tokenizer_max_length + crop_start, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device=device) prompt_attention_mask = text_inputs.attention_mask.to(device=device) prompt_embeds = text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True, ).hidden_states[-(num_hidden_layers_to_skip + 1)] if crop_start is not None and crop_start > 0: prompt_embeds = prompt_embeds[:, crop_start:] prompt_attention_mask = prompt_attention_mask[:, crop_start:] return prompt_embeds, prompt_attention_mask @staticmethod def _get_byt5_prompt_embeds( tokenizer: ByT5Tokenizer, text_encoder: T5EncoderModel, prompt: str | list[str], device: torch.device, tokenizer_max_length: int = 256, ): prompt = [prompt] if isinstance(prompt, str) else prompt glyph_texts = [extract_glyph_texts(p) for p in prompt] prompt_embeds_list = [] prompt_embeds_mask_list = [] for glyph_text in glyph_texts: if glyph_text is None: glyph_text_embeds = torch.zeros( (1, tokenizer_max_length, text_encoder.config.d_model), device=device, dtype=text_encoder.dtype ) glyph_text_embeds_mask = torch.zeros((1, tokenizer_max_length), device=device, dtype=torch.int64) else: txt_tokens = tokenizer( glyph_text, padding="max_length", max_length=tokenizer_max_length, truncation=True, add_special_tokens=True, return_tensors="pt", ).to(device) glyph_text_embeds = text_encoder( input_ids=txt_tokens.input_ids, attention_mask=txt_tokens.attention_mask.float(), )[0] glyph_text_embeds = glyph_text_embeds.to(device=device) glyph_text_embeds_mask = txt_tokens.attention_mask.to(device=device) prompt_embeds_list.append(glyph_text_embeds) prompt_embeds_mask_list.append(glyph_text_embeds_mask) prompt_embeds = torch.cat(prompt_embeds_list, dim=0) prompt_embeds_mask = torch.cat(prompt_embeds_mask_list, dim=0) return prompt_embeds, prompt_embeds_mask def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, dtype: torch.dtype | None = None, batch_size: int = 1, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, prompt_embeds_mask: torch.Tensor | None = None, prompt_embeds_2: torch.Tensor | None = None, prompt_embeds_mask_2: torch.Tensor | None = None, ): r""" Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device batch_size (`int`): batch size of prompts, defaults to 1 num_images_per_prompt (`int`): number of images that should be generated per prompt prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. If not provided, text embeddings will be generated from `prompt` input argument. prompt_embeds_mask (`torch.Tensor`, *optional*): Pre-generated text mask. If not provided, text mask will be generated from `prompt` input argument. prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated glyph text embeddings from ByT5. If not provided, will be generated from `prompt` input argument using self.tokenizer_2 and self.text_encoder_2. prompt_embeds_mask_2 (`torch.Tensor`, *optional*): Pre-generated glyph text mask from ByT5. If not provided, will be generated from `prompt` input argument using self.tokenizer_2 and self.text_encoder_2. """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype if prompt is None: prompt = [""] * batch_size prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_embeds, prompt_embeds_mask = self._get_mllm_prompt_embeds( tokenizer=self.tokenizer, text_encoder=self.text_encoder, prompt=prompt, device=device, tokenizer_max_length=self.tokenizer_max_length, system_message=self.system_message, crop_start=self.prompt_template_encode_start_idx, ) if prompt_embeds_2 is None: prompt_embeds_2, prompt_embeds_mask_2 = self._get_byt5_prompt_embeds( tokenizer=self.tokenizer_2, text_encoder=self.text_encoder_2, prompt=prompt, device=device, tokenizer_max_length=self.tokenizer_2_max_length, ) _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_videos_per_prompt, 1) prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_videos_per_prompt, seq_len) _, seq_len_2, _ = prompt_embeds_2.shape prompt_embeds_2 = prompt_embeds_2.repeat(1, num_videos_per_prompt, 1) prompt_embeds_2 = prompt_embeds_2.view(batch_size * num_videos_per_prompt, seq_len_2, -1) prompt_embeds_mask_2 = prompt_embeds_mask_2.repeat(1, num_videos_per_prompt, 1) prompt_embeds_mask_2 = prompt_embeds_mask_2.view(batch_size * num_videos_per_prompt, seq_len_2) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds_mask = prompt_embeds_mask.to(dtype=dtype, device=device) prompt_embeds_2 = prompt_embeds_2.to(dtype=dtype, device=device) prompt_embeds_mask_2 = prompt_embeds_mask_2.to(dtype=dtype, device=device) return prompt_embeds, prompt_embeds_mask, prompt_embeds_2, prompt_embeds_mask_2 def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_embeds_mask=None, negative_prompt_embeds_mask=None, prompt_embeds_2=None, prompt_embeds_mask_2=None, negative_prompt_embeds_2=None, negative_prompt_embeds_mask_2=None, ): if height is None and width is not None: raise ValueError("If `width` is provided, `height` also have to be provided.") elif width is None and height is not None: raise ValueError("If `height` is provided, `width` also have to be provided.") if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_embeds_mask is None: raise ValueError( "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if prompt is None and prompt_embeds_2 is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined." ) if prompt_embeds_2 is not None and prompt_embeds_mask_2 is None: raise ValueError( "If `prompt_embeds_2` are provided, `prompt_embeds_mask_2` also have to be passed. Make sure to generate `prompt_embeds_mask_2` from the same text encoder that was used to generate `prompt_embeds_2`." ) if negative_prompt_embeds_2 is not None and negative_prompt_embeds_mask_2 is None: raise ValueError( "If `negative_prompt_embeds_2` are provided, `negative_prompt_embeds_mask_2` also have to be passed. Make sure to generate `negative_prompt_embeds_mask_2` from the same text encoder that was used to generate `negative_prompt_embeds_2`." ) def prepare_latents( self, batch_size: int, num_channels_latents: int = 32, height: int = 720, width: int = 1280, num_frames: int = 129, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, (num_frames - 1) // self.vae_scale_factor_temporal + 1, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def prepare_cond_latents_and_mask(self, latents, dtype: torch.dtype | None, device: torch.device | None): """ Prepare conditional latents and mask for t2v generation. Args: latents: Main latents tensor (B, C, F, H, W) Returns: tuple: (cond_latents_concat, mask_concat) - both are zero tensors for t2v """ batch, channels, frames, height, width = latents.shape cond_latents_concat = torch.zeros(batch, channels, frames, height, width, dtype=dtype, device=device) mask_concat = torch.zeros(batch, 1, frames, height, width, dtype=dtype, device=device) return cond_latents_concat, mask_concat @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, negative_prompt: str | list[str] = None, height: int | None = None, width: int | None = None, num_frames: int = 121, num_inference_steps: int = 50, sigmas: list[float] = None, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, prompt_embeds_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds_mask: torch.Tensor | None = None, prompt_embeds_2: torch.Tensor | None = None, prompt_embeds_mask_2: torch.Tensor | None = None, negative_prompt_embeds_2: torch.Tensor | None = None, negative_prompt_embeds_mask_2: torch.Tensor | None = None, output_type: str | None = "np", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds` instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. height (`int`, *optional*): The height in pixels of the generated video. width (`int`, *optional*): The width in pixels of the generated video. num_frames (`int`, defaults to `121`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality video at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. prompt_embeds_mask (`torch.Tensor`, *optional*): Pre-generated mask for prompt embeddings. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_embeds_mask (`torch.Tensor`, *optional*): Pre-generated mask for negative prompt embeddings. prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated text embeddings from the second text encoder. Can be used to easily tweak text inputs. prompt_embeds_mask_2 (`torch.Tensor`, *optional*): Pre-generated mask for prompt embeddings from the second text encoder. negative_prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated negative text embeddings from the second text encoder. negative_prompt_embeds_mask_2 (`torch.Tensor`, *optional*): Pre-generated mask for negative prompt embeddings from the second text encoder. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated video. Choose between "np", "pt", or "latent". return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`HunyuanVideo15PipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Examples: Returns: [`~HunyuanVideo15PipelineOutput`] or `tuple`: If `return_dict` is `True`, [`HunyuanVideo15PipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated videos. """ # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_embeds_mask=prompt_embeds_mask, negative_prompt_embeds_mask=negative_prompt_embeds_mask, prompt_embeds_2=prompt_embeds_2, prompt_embeds_mask_2=prompt_embeds_mask_2, negative_prompt_embeds_2=negative_prompt_embeds_2, negative_prompt_embeds_mask_2=negative_prompt_embeds_mask_2, ) if height is None and width is None: height, width = self.video_processor.calculate_default_height_width( self.default_aspect_ratio[1], self.default_aspect_ratio[0], self.target_size ) self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, prompt_embeds_mask, prompt_embeds_2, prompt_embeds_mask_2 = self.encode_prompt( prompt=prompt, device=device, dtype=self.transformer.dtype, batch_size=batch_size, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, prompt_embeds_mask=prompt_embeds_mask, prompt_embeds_2=prompt_embeds_2, prompt_embeds_mask_2=prompt_embeds_mask_2, ) if self.guider._enabled and self.guider.num_conditions > 1: ( negative_prompt_embeds, negative_prompt_embeds_mask, negative_prompt_embeds_2, negative_prompt_embeds_mask_2, ) = self.encode_prompt( prompt=negative_prompt, device=device, dtype=self.transformer.dtype, batch_size=batch_size, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=negative_prompt_embeds, prompt_embeds_mask=negative_prompt_embeds_mask, prompt_embeds_2=negative_prompt_embeds_2, prompt_embeds_mask_2=negative_prompt_embeds_mask_2, ) # 4. Prepare timesteps sigmas = np.linspace(1.0, 0.0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas) # 5. Prepare latent variables latents = self.prepare_latents( batch_size * num_videos_per_prompt, self.num_channels_latents, height, width, num_frames, self.transformer.dtype, device, generator, latents, ) cond_latents_concat, mask_concat = self.prepare_cond_latents_and_mask(latents, self.transformer.dtype, device) image_embeds = torch.zeros( batch_size, self.vision_num_semantic_tokens, self.vision_states_dim, dtype=self.transformer.dtype, device=device, ) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = torch.cat([latents, cond_latents_concat, mask_concat], dim=1) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]).to(latent_model_input.dtype) # Step 1: Collect model inputs needed for the guidance method # conditional inputs should always be first element in the tuple guider_inputs = { "encoder_hidden_states": (prompt_embeds, negative_prompt_embeds), "encoder_attention_mask": (prompt_embeds_mask, negative_prompt_embeds_mask), "encoder_hidden_states_2": (prompt_embeds_2, negative_prompt_embeds_2), "encoder_attention_mask_2": (prompt_embeds_mask_2, negative_prompt_embeds_mask_2), } # Step 2: Update guider's internal state for this denoising step self.guider.set_state(step=i, num_inference_steps=num_inference_steps, timestep=t) # Step 3: Prepare batched model inputs based on the guidance method # The guider splits model inputs into separate batches for conditional/unconditional predictions. # For CFG with guider_inputs = {"encoder_hidden_states": (prompt_embeds, negative_prompt_embeds)}: # you will get a guider_state with two batches: # guider_state = [ # {"encoder_hidden_states": prompt_embeds, "__guidance_identifier__": "pred_cond"}, # conditional batch # {"encoder_hidden_states": negative_prompt_embeds, "__guidance_identifier__": "pred_uncond"}, # unconditional batch # ] # Other guidance methods may return 1 batch (no guidance) or 3+ batches (e.g., PAG, APG). guider_state = self.guider.prepare_inputs(guider_inputs) # Step 4: Run the denoiser for each batch # Each batch in guider_state represents a different conditioning (conditional, unconditional, etc.). # We run the model once per batch and store the noise prediction in guider_state_batch.noise_pred. for guider_state_batch in guider_state: self.guider.prepare_models(self.transformer) # Extract conditioning kwargs for this batch (e.g., encoder_hidden_states) cond_kwargs = { input_name: getattr(guider_state_batch, input_name) for input_name in guider_inputs.keys() } # e.g. "pred_cond"/"pred_uncond" context_name = getattr(guider_state_batch, self.guider._identifier_key) with self.transformer.cache_context(context_name): # Run denoiser and store noise prediction in this batch guider_state_batch.noise_pred = self.transformer( hidden_states=latent_model_input, image_embeds=image_embeds, timestep=timestep, attention_kwargs=self.attention_kwargs, return_dict=False, **cond_kwargs, )[0] # Cleanup model (e.g., remove hooks) self.guider.cleanup_models(self.transformer) # Step 5: Combine predictions using the guidance method # The guider takes all noise predictions from guider_state and combines them according to the guidance algorithm. # Continuing the CFG example, the guider receives: # guider_state = [ # {"encoder_hidden_states": prompt_embeds, "noise_pred": noise_pred_cond, "__guidance_identifier__": "pred_cond"}, # batch 0 # {"encoder_hidden_states": negative_prompt_embeds, "noise_pred": noise_pred_uncond, "__guidance_identifier__": "pred_uncond"}, # batch 1 # ] # And extracts predictions using the __guidance_identifier__: # pred_cond = guider_state[0]["noise_pred"] # extracts noise_pred_cond # pred_uncond = guider_state[1]["noise_pred"] # extracts noise_pred_uncond # Then applies CFG formula: # noise_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond) # Returns GuiderOutput(pred=noise_pred, pred_cond=pred_cond, pred_uncond=pred_uncond) noise_pred = self.guider(guider_state)[0] # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None # 8. decode the latents to video and postprocess if not output_type == "latent": latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return HunyuanVideo15PipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5.py", "license": "Apache License 2.0", "lines": 726, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py
# Copyright 2025 The HunyuanVideo Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import re from typing import Any import numpy as np import PIL import torch from transformers import ( ByT5Tokenizer, Qwen2_5_VLTextModel, Qwen2Tokenizer, SiglipImageProcessor, SiglipVisionModel, T5EncoderModel, ) from ...guiders import ClassifierFreeGuidance from ...models import AutoencoderKLHunyuanVideo15, HunyuanVideo15Transformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .image_processor import HunyuanVideo15ImageProcessor from .pipeline_output import HunyuanVideo15PipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> from diffusers import HunyuanVideo15ImageToVideoPipeline >>> from diffusers.utils import export_to_video >>> model_id = "hunyuanvideo-community/HunyuanVideo-1.5-480p_i2v" >>> pipe = HunyuanVideo15ImageToVideoPipeline.from_pretrained(model_id, torch_dtype=torch.float16) >>> pipe.vae.enable_tiling() >>> pipe.to("cuda") >>> image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/wan_i2v_input.JPG") >>> output = pipe( ... prompt="Summer beach vacation style, a white cat wearing sunglasses sits on a surfboard. The fluffy-furred feline gazes directly at the camera with a relaxed expression. Blurred beach scenery forms the background featuring crystal-clear waters, distant green hills, and a blue sky dotted with white clouds. The cat assumes a naturally relaxed posture, as if savoring the sea breeze and warm sunlight. A close-up shot highlights the feline's intricate details and the refreshing atmosphere of the seaside.", ... image=image, ... num_inference_steps=50, ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=24) ``` """ # Copied from diffusers.pipelines.hunyuan_video1_5.pipeline_hunyuan_video1_5.format_text_input def format_text_input(prompt: list[str], system_message: str) -> list[dict[str, Any]]: """ Apply text to template. Args: prompt (list[str]): Input text. system_message (str): System message. Returns: list[dict[str, Any]]: List of chat conversation. """ template = [ [{"role": "system", "content": system_message}, {"role": "user", "content": p if p else " "}] for p in prompt ] return template # Copied from diffusers.pipelines.hunyuan_video1_5.pipeline_hunyuan_video1_5.extract_glyph_texts def extract_glyph_texts(prompt: str) -> list[str]: """ Extract glyph texts from prompt using regex pattern. Args: prompt: Input prompt string Returns: List of extracted glyph texts """ pattern = r"\"(.*?)\"|“(.*?)”" matches = re.findall(pattern, prompt) result = [match[0] or match[1] for match in matches] result = list(dict.fromkeys(result)) if len(result) > 1 else result if result: formatted_result = ". ".join([f'Text "{text}"' for text in result]) + ". " else: formatted_result = None return formatted_result # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class HunyuanVideo15ImageToVideoPipeline(DiffusionPipeline): r""" Pipeline for image-to-video generation using HunyuanVideo1.5. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: transformer ([`HunyuanVideo15Transformer3DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded video latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded video latents. vae ([`AutoencoderKLHunyuanVideo15`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. text_encoder ([`Qwen2.5-VL-7B-Instruct`]): [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant. tokenizer (`Qwen2Tokenizer`): Tokenizer of class [Qwen2Tokenizer]. text_encoder_2 ([`T5EncoderModel`]): [T5EncoderModel](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel) variant. tokenizer_2 (`ByT5Tokenizer`): Tokenizer of class [ByT5Tokenizer] guider ([`ClassifierFreeGuidance`]): [ClassifierFreeGuidance]for classifier free guidance. image_encoder ([`SiglipVisionModel`]): [SiglipVisionModel](https://huggingface.co/docs/transformers/en/model_doc/siglip#transformers.SiglipVisionModel) variant. feature_extractor ([`SiglipImageProcessor`]): [SiglipImageProcessor](https://huggingface.co/docs/transformers/en/model_doc/siglip#transformers.SiglipImageProcessor) variant. """ model_cpu_offload_seq = "image_encoder->text_encoder->transformer->vae" def __init__( self, text_encoder: Qwen2_5_VLTextModel, tokenizer: Qwen2Tokenizer, transformer: HunyuanVideo15Transformer3DModel, vae: AutoencoderKLHunyuanVideo15, scheduler: FlowMatchEulerDiscreteScheduler, text_encoder_2: T5EncoderModel, tokenizer_2: ByT5Tokenizer, guider: ClassifierFreeGuidance, image_encoder: SiglipVisionModel, feature_extractor: SiglipImageProcessor, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, guider=guider, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 16 self.video_processor = HunyuanVideo15ImageProcessor( vae_scale_factor=self.vae_scale_factor_spatial, do_resize=False, do_convert_rgb=True ) self.target_size = self.transformer.config.target_size if getattr(self, "transformer", None) else 640 self.vision_states_dim = ( self.transformer.config.image_embed_dim if getattr(self, "transformer", None) else 1152 ) self.num_channels_latents = self.vae.config.latent_channels if hasattr(self, "vae") else 32 # fmt: off self.system_message = "You are a helpful assistant. Describe the video by detailing the following aspects: \ 1. The main content and theme of the video. \ 2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects. \ 3. Actions, events, behaviors temporal relationships, physical movement changes of the objects. \ 4. background environment, light, style and atmosphere. \ 5. camera angles, movements, and transitions used in the video." # fmt: on self.prompt_template_encode_start_idx = 108 self.tokenizer_max_length = 1000 self.tokenizer_2_max_length = 256 self.vision_num_semantic_tokens = 729 @staticmethod # Copied from diffusers.pipelines.hunyuan_video1_5.pipeline_hunyuan_video1_5.HunyuanVideo15Pipeline._get_mllm_prompt_embeds def _get_mllm_prompt_embeds( text_encoder: Qwen2_5_VLTextModel, tokenizer: Qwen2Tokenizer, prompt: str | list[str], device: torch.device, tokenizer_max_length: int = 1000, num_hidden_layers_to_skip: int = 2, # fmt: off system_message: str = "You are a helpful assistant. Describe the video by detailing the following aspects: \ 1. The main content and theme of the video. \ 2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects. \ 3. Actions, events, behaviors temporal relationships, physical movement changes of the objects. \ 4. background environment, light, style and atmosphere. \ 5. camera angles, movements, and transitions used in the video.", # fmt: on crop_start: int = 108, ) -> tuple[torch.Tensor, torch.Tensor]: prompt = [prompt] if isinstance(prompt, str) else prompt prompt = format_text_input(prompt, system_message) text_inputs = tokenizer.apply_chat_template( prompt, add_generation_prompt=True, tokenize=True, return_dict=True, padding="max_length", max_length=tokenizer_max_length + crop_start, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device=device) prompt_attention_mask = text_inputs.attention_mask.to(device=device) prompt_embeds = text_encoder( input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True, ).hidden_states[-(num_hidden_layers_to_skip + 1)] if crop_start is not None and crop_start > 0: prompt_embeds = prompt_embeds[:, crop_start:] prompt_attention_mask = prompt_attention_mask[:, crop_start:] return prompt_embeds, prompt_attention_mask @staticmethod # Copied from diffusers.pipelines.hunyuan_video1_5.pipeline_hunyuan_video1_5.HunyuanVideo15Pipeline._get_byt5_prompt_embeds def _get_byt5_prompt_embeds( tokenizer: ByT5Tokenizer, text_encoder: T5EncoderModel, prompt: str | list[str], device: torch.device, tokenizer_max_length: int = 256, ): prompt = [prompt] if isinstance(prompt, str) else prompt glyph_texts = [extract_glyph_texts(p) for p in prompt] prompt_embeds_list = [] prompt_embeds_mask_list = [] for glyph_text in glyph_texts: if glyph_text is None: glyph_text_embeds = torch.zeros( (1, tokenizer_max_length, text_encoder.config.d_model), device=device, dtype=text_encoder.dtype ) glyph_text_embeds_mask = torch.zeros((1, tokenizer_max_length), device=device, dtype=torch.int64) else: txt_tokens = tokenizer( glyph_text, padding="max_length", max_length=tokenizer_max_length, truncation=True, add_special_tokens=True, return_tensors="pt", ).to(device) glyph_text_embeds = text_encoder( input_ids=txt_tokens.input_ids, attention_mask=txt_tokens.attention_mask.float(), )[0] glyph_text_embeds = glyph_text_embeds.to(device=device) glyph_text_embeds_mask = txt_tokens.attention_mask.to(device=device) prompt_embeds_list.append(glyph_text_embeds) prompt_embeds_mask_list.append(glyph_text_embeds_mask) prompt_embeds = torch.cat(prompt_embeds_list, dim=0) prompt_embeds_mask = torch.cat(prompt_embeds_mask_list, dim=0) return prompt_embeds, prompt_embeds_mask @staticmethod def _get_image_latents( vae: AutoencoderKLHunyuanVideo15, image_processor: HunyuanVideo15ImageProcessor, image: PIL.Image.Image, height: int, width: int, device: torch.device, ) -> torch.Tensor: vae_dtype = vae.dtype image_tensor = image_processor.preprocess(image, height=height, width=width).to(device, dtype=vae_dtype) image_tensor = image_tensor.unsqueeze(2) image_latents = retrieve_latents(vae.encode(image_tensor), sample_mode="argmax") image_latents = image_latents * vae.config.scaling_factor return image_latents @staticmethod def _get_image_embeds( image_encoder: SiglipVisionModel, feature_extractor: SiglipImageProcessor, image: PIL.Image.Image, device: torch.device, ) -> torch.Tensor: image_encoder_dtype = next(image_encoder.parameters()).dtype image = feature_extractor.preprocess(images=image, do_resize=True, return_tensors="pt", do_convert_rgb=True) image = image.to(device=device, dtype=image_encoder_dtype) image_enc_hidden_states = image_encoder(**image).last_hidden_state return image_enc_hidden_states def encode_image( self, image: PIL.Image.Image, batch_size: int, device: torch.device, dtype: torch.dtype, ) -> torch.Tensor: image_embeds = self._get_image_embeds( image_encoder=self.image_encoder, feature_extractor=self.feature_extractor, image=image, device=device, ) image_embeds = image_embeds.repeat(batch_size, 1, 1) image_embeds = image_embeds.to(device=device, dtype=dtype) return image_embeds # Copied from diffusers.pipelines.hunyuan_video1_5.pipeline_hunyuan_video1_5.HunyuanVideo15Pipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, dtype: torch.dtype | None = None, batch_size: int = 1, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, prompt_embeds_mask: torch.Tensor | None = None, prompt_embeds_2: torch.Tensor | None = None, prompt_embeds_mask_2: torch.Tensor | None = None, ): r""" Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device batch_size (`int`): batch size of prompts, defaults to 1 num_images_per_prompt (`int`): number of images that should be generated per prompt prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. If not provided, text embeddings will be generated from `prompt` input argument. prompt_embeds_mask (`torch.Tensor`, *optional*): Pre-generated text mask. If not provided, text mask will be generated from `prompt` input argument. prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated glyph text embeddings from ByT5. If not provided, will be generated from `prompt` input argument using self.tokenizer_2 and self.text_encoder_2. prompt_embeds_mask_2 (`torch.Tensor`, *optional*): Pre-generated glyph text mask from ByT5. If not provided, will be generated from `prompt` input argument using self.tokenizer_2 and self.text_encoder_2. """ device = device or self._execution_device dtype = dtype or self.text_encoder.dtype if prompt is None: prompt = [""] * batch_size prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_embeds, prompt_embeds_mask = self._get_mllm_prompt_embeds( tokenizer=self.tokenizer, text_encoder=self.text_encoder, prompt=prompt, device=device, tokenizer_max_length=self.tokenizer_max_length, system_message=self.system_message, crop_start=self.prompt_template_encode_start_idx, ) if prompt_embeds_2 is None: prompt_embeds_2, prompt_embeds_mask_2 = self._get_byt5_prompt_embeds( tokenizer=self.tokenizer_2, text_encoder=self.text_encoder_2, prompt=prompt, device=device, tokenizer_max_length=self.tokenizer_2_max_length, ) _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_videos_per_prompt, 1) prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_videos_per_prompt, seq_len) _, seq_len_2, _ = prompt_embeds_2.shape prompt_embeds_2 = prompt_embeds_2.repeat(1, num_videos_per_prompt, 1) prompt_embeds_2 = prompt_embeds_2.view(batch_size * num_videos_per_prompt, seq_len_2, -1) prompt_embeds_mask_2 = prompt_embeds_mask_2.repeat(1, num_videos_per_prompt, 1) prompt_embeds_mask_2 = prompt_embeds_mask_2.view(batch_size * num_videos_per_prompt, seq_len_2) prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds_mask = prompt_embeds_mask.to(dtype=dtype, device=device) prompt_embeds_2 = prompt_embeds_2.to(dtype=dtype, device=device) prompt_embeds_mask_2 = prompt_embeds_mask_2.to(dtype=dtype, device=device) return prompt_embeds, prompt_embeds_mask, prompt_embeds_2, prompt_embeds_mask_2 def check_inputs( self, prompt, image: PIL.Image.Image, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_embeds_mask=None, negative_prompt_embeds_mask=None, prompt_embeds_2=None, prompt_embeds_mask_2=None, negative_prompt_embeds_2=None, negative_prompt_embeds_mask_2=None, ): if not isinstance(image, PIL.Image.Image): raise ValueError(f"`image` has to be of type `PIL.Image.Image` but is {type(image)}") if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_embeds_mask is None: raise ValueError( "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`." ) if prompt is None and prompt_embeds_2 is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_2`. Cannot leave both `prompt` and `prompt_embeds_2` undefined." ) if prompt_embeds_2 is not None and prompt_embeds_mask_2 is None: raise ValueError( "If `prompt_embeds_2` are provided, `prompt_embeds_mask_2` also have to be passed. Make sure to generate `prompt_embeds_mask_2` from the same text encoder that was used to generate `prompt_embeds_2`." ) if negative_prompt_embeds_2 is not None and negative_prompt_embeds_mask_2 is None: raise ValueError( "If `negative_prompt_embeds_2` are provided, `negative_prompt_embeds_mask_2` also have to be passed. Make sure to generate `negative_prompt_embeds_mask_2` from the same text encoder that was used to generate `negative_prompt_embeds_2`." ) # Copied from diffusers.pipelines.hunyuan_video1_5.pipeline_hunyuan_video1_5.HunyuanVideo15Pipeline.prepare_latents def prepare_latents( self, batch_size: int, num_channels_latents: int = 32, height: int = 720, width: int = 1280, num_frames: int = 129, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: if latents is not None: return latents.to(device=device, dtype=dtype) shape = ( batch_size, num_channels_latents, (num_frames - 1) // self.vae_scale_factor_temporal + 1, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def prepare_cond_latents_and_mask( self, latents: torch.Tensor, image: PIL.Image.Image, batch_size: int, height: int, width: int, dtype: torch.dtype, device: torch.device, ): """ Prepare conditional latents and mask for t2v generation. Args: latents: Main latents tensor (B, C, F, H, W) Returns: tuple: (cond_latents_concat, mask_concat) - both are zero tensors for t2v """ batch, channels, frames, height, width = latents.shape image_latents = self._get_image_latents( vae=self.vae, image_processor=self.video_processor, image=image, height=height, width=width, device=device, ) latent_condition = image_latents.repeat(batch_size, 1, frames, 1, 1) latent_condition[:, :, 1:, :, :] = 0 latent_condition = latent_condition.to(device=device, dtype=dtype) latent_mask = torch.zeros(batch, 1, frames, height, width, dtype=dtype, device=device) latent_mask[:, :, 0, :, :] = 1.0 return latent_condition, latent_mask @property def num_timesteps(self): return self._num_timesteps @property def attention_kwargs(self): return self._attention_kwargs @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PIL.Image.Image, prompt: str | list[str] = None, negative_prompt: str | list[str] = None, num_frames: int = 121, num_inference_steps: int = 50, sigmas: list[float] = None, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, prompt_embeds_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds_mask: torch.Tensor | None = None, prompt_embeds_2: torch.Tensor | None = None, prompt_embeds_mask_2: torch.Tensor | None = None, negative_prompt_embeds_2: torch.Tensor | None = None, negative_prompt_embeds_mask_2: torch.Tensor | None = None, output_type: str | None = "np", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, ): r""" The call function to the pipeline for generation. Args: image (`PIL.Image.Image`): The input image to condition video generation on. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds` instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds` instead. num_frames (`int`, defaults to `121`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality video at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. prompt_embeds_mask (`torch.Tensor`, *optional*): Pre-generated mask for prompt embeddings. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_embeds_mask (`torch.Tensor`, *optional*): Pre-generated mask for negative prompt embeddings. prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated text embeddings from the second text encoder. Can be used to easily tweak text inputs. prompt_embeds_mask_2 (`torch.Tensor`, *optional*): Pre-generated mask for prompt embeddings from the second text encoder. negative_prompt_embeds_2 (`torch.Tensor`, *optional*): Pre-generated negative text embeddings from the second text encoder. negative_prompt_embeds_mask_2 (`torch.Tensor`, *optional*): Pre-generated mask for negative prompt embeddings from the second text encoder. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated video. Choose between "np", "pt", or "latent". return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`HunyuanVideo15PipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Examples: Returns: [`~HunyuanVideo15PipelineOutput`] or `tuple`: If `return_dict` is `True`, [`HunyuanVideo15PipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated videos. """ # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, image=image, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_embeds_mask=prompt_embeds_mask, negative_prompt_embeds_mask=negative_prompt_embeds_mask, prompt_embeds_2=prompt_embeds_2, prompt_embeds_mask_2=prompt_embeds_mask_2, negative_prompt_embeds_2=negative_prompt_embeds_2, negative_prompt_embeds_mask_2=negative_prompt_embeds_mask_2, ) height, width = self.video_processor.calculate_default_height_width( height=image.size[1], width=image.size[0], target_size=self.target_size ) image = self.video_processor.resize(image, height=height, width=width, resize_mode="crop") self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode image image_embeds = self.encode_image( image=image, batch_size=batch_size * num_videos_per_prompt, device=device, dtype=self.transformer.dtype, ) # 4. Encode input prompt prompt_embeds, prompt_embeds_mask, prompt_embeds_2, prompt_embeds_mask_2 = self.encode_prompt( prompt=prompt, device=device, dtype=self.transformer.dtype, batch_size=batch_size, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, prompt_embeds_mask=prompt_embeds_mask, prompt_embeds_2=prompt_embeds_2, prompt_embeds_mask_2=prompt_embeds_mask_2, ) if self.guider._enabled and self.guider.num_conditions > 1: ( negative_prompt_embeds, negative_prompt_embeds_mask, negative_prompt_embeds_2, negative_prompt_embeds_mask_2, ) = self.encode_prompt( prompt=negative_prompt, device=device, dtype=self.transformer.dtype, batch_size=batch_size, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=negative_prompt_embeds, prompt_embeds_mask=negative_prompt_embeds_mask, prompt_embeds_2=negative_prompt_embeds_2, prompt_embeds_mask_2=negative_prompt_embeds_mask_2, ) # 5. Prepare timesteps sigmas = np.linspace(1.0, 0.0, num_inference_steps + 1)[:-1] if sigmas is None else sigmas timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, sigmas=sigmas) # 6. Prepare latent variables latents = self.prepare_latents( batch_size=batch_size * num_videos_per_prompt, num_channels_latents=self.num_channels_latents, height=height, width=width, num_frames=num_frames, dtype=self.transformer.dtype, device=device, generator=generator, latents=latents, ) cond_latents_concat, mask_concat = self.prepare_cond_latents_and_mask( latents=latents, image=image, batch_size=batch_size * num_videos_per_prompt, height=height, width=width, dtype=self.transformer.dtype, device=device, ) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t latent_model_input = torch.cat([latents, cond_latents_concat, mask_concat], dim=1) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]).to(latent_model_input.dtype) if self.transformer.config.use_meanflow: if i == len(timesteps) - 1: timestep_r = torch.tensor([0.0], device=device) else: timestep_r = timesteps[i + 1] timestep_r = timestep_r.expand(latents.shape[0]).to(latents.dtype) else: timestep_r = None # Step 1: Collect model inputs needed for the guidance method # conditional inputs should always be first element in the tuple guider_inputs = { "encoder_hidden_states": (prompt_embeds, negative_prompt_embeds), "encoder_attention_mask": (prompt_embeds_mask, negative_prompt_embeds_mask), "encoder_hidden_states_2": (prompt_embeds_2, negative_prompt_embeds_2), "encoder_attention_mask_2": (prompt_embeds_mask_2, negative_prompt_embeds_mask_2), } # Step 2: Update guider's internal state for this denoising step self.guider.set_state(step=i, num_inference_steps=num_inference_steps, timestep=t) # Step 3: Prepare batched model inputs based on the guidance method # The guider splits model inputs into separate batches for conditional/unconditional predictions. # For CFG with guider_inputs = {"encoder_hidden_states": (prompt_embeds, negative_prompt_embeds)}: # you will get a guider_state with two batches: # guider_state = [ # {"encoder_hidden_states": prompt_embeds, "__guidance_identifier__": "pred_cond"}, # conditional batch # {"encoder_hidden_states": negative_prompt_embeds, "__guidance_identifier__": "pred_uncond"}, # unconditional batch # ] # Other guidance methods may return 1 batch (no guidance) or 3+ batches (e.g., PAG, APG). guider_state = self.guider.prepare_inputs(guider_inputs) # Step 4: Run the denoiser for each batch # Each batch in guider_state represents a different conditioning (conditional, unconditional, etc.). # We run the model once per batch and store the noise prediction in guider_state_batch.noise_pred. for guider_state_batch in guider_state: self.guider.prepare_models(self.transformer) # Extract conditioning kwargs for this batch (e.g., encoder_hidden_states) cond_kwargs = { input_name: getattr(guider_state_batch, input_name) for input_name in guider_inputs.keys() } # e.g. "pred_cond"/"pred_uncond" context_name = getattr(guider_state_batch, self.guider._identifier_key) with self.transformer.cache_context(context_name): # Run denoiser and store noise prediction in this batch guider_state_batch.noise_pred = self.transformer( hidden_states=latent_model_input, image_embeds=image_embeds, timestep=timestep, timestep_r=timestep_r, attention_kwargs=self.attention_kwargs, return_dict=False, **cond_kwargs, )[0] # Cleanup model (e.g., remove hooks) self.guider.cleanup_models(self.transformer) # Step 5: Combine predictions using the guidance method # The guider takes all noise predictions from guider_state and combines them according to the guidance algorithm. # Continuing the CFG example, the guider receives: # guider_state = [ # {"encoder_hidden_states": prompt_embeds, "noise_pred": noise_pred_cond, "__guidance_identifier__": "pred_cond"}, # batch 0 # {"encoder_hidden_states": negative_prompt_embeds, "noise_pred": noise_pred_uncond, "__guidance_identifier__": "pred_uncond"}, # batch 1 # ] # And extracts predictions using the __guidance_identifier__: # pred_cond = guider_state[0]["noise_pred"] # extracts noise_pred_cond # pred_uncond = guider_state[1]["noise_pred"] # extracts noise_pred_uncond # Then applies CFG formula: # noise_pred = pred_uncond + guidance_scale * (pred_cond - pred_uncond) # Returns GuiderOutput(pred=noise_pred, pred_cond=pred_cond, pred_uncond=pred_uncond) noise_pred = self.guider(guider_state)[0] # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return HunyuanVideo15PipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py", "license": "Apache License 2.0", "lines": 837, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/hunyuan_video1_5/pipeline_output.py
from dataclasses import dataclass import torch from diffusers.utils import BaseOutput @dataclass class HunyuanVideo15PipelineOutput(BaseOutput): r""" Output class for HunyuanVideo1.5 pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or list[list[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: torch.Tensor
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/hunyuan_video1_5/pipeline_output.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:tests/models/transformers/test_models_transformer_hunyuan_1_5.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import HunyuanVideo15Transformer3DModel from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class HunyuanVideo15Transformer3DTests(ModelTesterMixin, unittest.TestCase): model_class = HunyuanVideo15Transformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True model_split_percents = [0.99, 0.99, 0.99] text_embed_dim = 16 text_embed_2_dim = 8 image_embed_dim = 12 @property def dummy_input(self): batch_size = 1 num_channels = 4 num_frames = 1 height = 8 width = 8 sequence_length = 6 sequence_length_2 = 4 image_sequence_length = 3 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.tensor([1.0]).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, self.text_embed_dim), device=torch_device) encoder_hidden_states_2 = torch.randn( (batch_size, sequence_length_2, self.text_embed_2_dim), device=torch_device ) encoder_attention_mask = torch.ones((batch_size, sequence_length), device=torch_device) encoder_attention_mask_2 = torch.ones((batch_size, sequence_length_2), device=torch_device) # All zeros for inducing T2V path in the model. image_embeds = torch.zeros((batch_size, image_sequence_length, self.image_embed_dim), device=torch_device) return { "hidden_states": hidden_states, "timestep": timestep, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, "encoder_hidden_states_2": encoder_hidden_states_2, "encoder_attention_mask_2": encoder_attention_mask_2, "image_embeds": image_embeds, } @property def input_shape(self): return (4, 1, 8, 8) @property def output_shape(self): return (4, 1, 8, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 4, "out_channels": 4, "num_attention_heads": 2, "attention_head_dim": 8, "num_layers": 2, "num_refiner_layers": 1, "mlp_ratio": 2.0, "patch_size": 1, "patch_size_t": 1, "text_embed_dim": self.text_embed_dim, "text_embed_2_dim": self.text_embed_2_dim, "image_embed_dim": self.image_embed_dim, "rope_axes_dim": (2, 2, 4), "target_size": 16, "task_type": "t2v", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"HunyuanVideo15Transformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_hunyuan_1_5.py", "license": "Apache License 2.0", "lines": 85, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/hunyuan_video1_5/test_hunyuan_1_5.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from transformers import ( AutoConfig, ByT5Tokenizer, Qwen2_5_VLTextConfig, Qwen2_5_VLTextModel, Qwen2Tokenizer, T5EncoderModel, ) from diffusers import ( AutoencoderKLHunyuanVideo15, FlowMatchEulerDiscreteScheduler, HunyuanVideo15Pipeline, HunyuanVideo15Transformer3DModel, ) from diffusers.guiders import ClassifierFreeGuidance from ...testing_utils import enable_full_determinism from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class HunyuanVideo15PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = HunyuanVideo15Pipeline params = frozenset( [ "prompt", "negative_prompt", "height", "width", "prompt_embeds", "prompt_embeds_mask", "negative_prompt_embeds", "negative_prompt_embeds_mask", "prompt_embeds_2", "prompt_embeds_mask_2", "negative_prompt_embeds_2", "negative_prompt_embeds_mask_2", ] ) batch_params = ["prompt", "negative_prompt"] required_optional_params = frozenset(["num_inference_steps", "generator", "latents", "return_dict"]) test_attention_slicing = False test_xformers_attention = False test_layerwise_casting = True test_group_offloading = False supports_dduf = False def get_dummy_components(self, num_layers: int = 1): torch.manual_seed(0) transformer = HunyuanVideo15Transformer3DModel( in_channels=9, out_channels=4, num_attention_heads=2, attention_head_dim=8, num_layers=num_layers, num_refiner_layers=1, mlp_ratio=2.0, patch_size=1, patch_size_t=1, text_embed_dim=16, text_embed_2_dim=32, image_embed_dim=12, rope_axes_dim=(2, 2, 4), target_size=16, task_type="t2v", ) torch.manual_seed(0) vae = AutoencoderKLHunyuanVideo15( in_channels=3, out_channels=3, latent_channels=4, block_out_channels=(16, 16), layers_per_block=1, spatial_compression_ratio=4, temporal_compression_ratio=2, downsample_match_channel=False, upsample_match_channel=False, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) torch.manual_seed(0) qwen_config = Qwen2_5_VLTextConfig( **{ "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 2, "num_attention_heads": 2, "num_key_value_heads": 2, "rope_scaling": { "mrope_section": [1, 1, 2], "rope_type": "default", "type": "default", }, "rope_theta": 1000000.0, } ) text_encoder = Qwen2_5_VLTextModel(qwen_config) tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") torch.manual_seed(0) config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5") text_encoder_2 = T5EncoderModel(config) tokenizer_2 = ByT5Tokenizer() guider = ClassifierFreeGuidance(guidance_scale=1.0) components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder.eval(), "text_encoder_2": text_encoder_2.eval(), "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "guider": guider, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "monkey", "generator": generator, "num_inference_steps": 2, "height": 16, "width": 16, "num_frames": 9, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) result = pipe(**inputs) video = result.frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 16, 16)) generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) # fmt: off expected_slice = torch.tensor([0.4296, 0.5549, 0.3088, 0.9115, 0.5049, 0.7926, 0.5549, 0.8618, 0.5091, 0.5075, 0.7117, 0.5292, 0.7053, 0.4864, 0.5206, 0.3878]) # fmt: on self.assertTrue( torch.abs(generated_slice - expected_slice).max() < 1e-3, f"output_slice: {generated_slice}, expected_slice: {expected_slice}", ) @unittest.skip("TODO: Test not supported for now because needs to be adjusted to work with guiders.") def test_encode_prompt_works_in_isolation(self): pass @unittest.skip("Needs to be revisited.") def test_inference_batch_consistent(self): super().test_inference_batch_consistent() @unittest.skip("Needs to be revisited.") def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical()
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/hunyuan_video1_5/test_hunyuan_1_5.py", "license": "Apache License 2.0", "lines": 169, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/z_image/test_z_image.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import unittest import numpy as np import torch from transformers import Qwen2Tokenizer, Qwen3Config, Qwen3Model from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, ZImagePipeline, ZImageTransformer2DModel from ...testing_utils import torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np # Z-Image requires torch.use_deterministic_algorithms(False) due to complex64 RoPE operations # Cannot use enable_full_determinism() which sets it to True os.environ["CUDA_LAUNCH_BLOCKING"] = "1" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.use_deterministic_algorithms(False) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if hasattr(torch.backends, "cuda"): torch.backends.cuda.matmul.allow_tf32 = False # Note: Some tests (test_float16_inference, test_save_load_float16) may fail in full suite # due to RopeEmbedder cache state pollution between tests. They pass when run individually. # This is a known test isolation issue, not a functional bug. class ZImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ZImagePipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) supports_dduf = False test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True def setUp(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) def tearDown(self): super().tearDown() gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) def get_dummy_components(self): torch.manual_seed(0) transformer = ZImageTransformer2DModel( all_patch_size=(2,), all_f_patch_size=(1,), in_channels=16, dim=32, n_layers=2, n_refiner_layers=1, n_heads=2, n_kv_heads=2, norm_eps=1e-5, qk_norm=True, cap_feat_dim=16, rope_theta=256.0, t_scale=1000.0, axes_dims=[8, 4, 4], axes_lens=[256, 32, 32], ) # `x_pad_token` and `cap_pad_token` are initialized with `torch.empty`. # This can cause NaN data values in our testing environment. Fixating them # helps prevent that issue. with torch.no_grad(): transformer.x_pad_token.copy_(torch.ones_like(transformer.x_pad_token.data)) transformer.cap_pad_token.copy_(torch.ones_like(transformer.cap_pad_token.data)) torch.manual_seed(0) vae = AutoencoderKL( in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], block_out_channels=[32, 64], layers_per_block=1, latent_channels=16, norm_num_groups=32, sample_size=32, scaling_factor=0.3611, shift_factor=0.1159, ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler() torch.manual_seed(0) config = Qwen3Config( hidden_size=16, intermediate_size=16, num_hidden_layers=2, num_attention_heads=2, num_key_value_heads=2, vocab_size=151936, max_position_embeddings=512, ) text_encoder = Qwen3Model(config) tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "bad quality", "generator": generator, "num_inference_steps": 2, "guidance_scale": 3.0, "cfg_normalization": False, "cfg_truncation": 1.0, "height": 32, "width": 32, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images generated_image = image[0] self.assertEqual(generated_image.shape, (3, 32, 32)) # fmt: off expected_slice = torch.tensor([0.4622, 0.4532, 0.4714, 0.5087, 0.5371, 0.5405, 0.4492, 0.4479, 0.2984, 0.2783, 0.5409, 0.6577, 0.3952, 0.5524, 0.5262, 0.453]) # fmt: on generated_slice = generated_image.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=5e-2)) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) def test_num_images_per_prompt(self): import inspect sig = inspect.signature(self.pipeline_class.__call__) if "num_images_per_prompt" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt del pipe gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) def test_vae_tiling(self, expected_diff_max: float = 0.2): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to("cpu") pipe.set_progress_bar_config(disable=None) # Without tiling inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_without_tiling = pipe(**inputs)[0] # With tiling (standard AutoencoderKL doesn't accept parameters) pipe.vae.enable_tiling() inputs = self.get_dummy_inputs(generator_device) inputs["height"] = inputs["width"] = 128 output_with_tiling = pipe(**inputs)[0] self.assertLess( (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), expected_diff_max, "VAE tiling should not affect the inference results", ) def test_pipeline_with_accelerator_device_map(self, expected_max_difference=5e-4): # Z-Image RoPE embeddings (complex64) have slightly higher numerical tolerance super().test_pipeline_with_accelerator_device_map(expected_max_difference=expected_max_difference) def test_group_offloading_inference(self): # Block-level offloading conflicts with RoPE cache. Pipeline-level offloading (tested separately) works fine. self.skipTest("Using test_pipeline_level_group_offloading_inference instead") def test_save_load_float16(self, expected_max_diff=1e-2): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) super().test_save_load_float16(expected_max_diff=expected_max_diff)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/z_image/test_z_image.py", "license": "Apache License 2.0", "lines": 258, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/dreambooth/test_dreambooth_lora_flux2.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import sys import tempfile import safetensors from diffusers.loaders.lora_base import LORA_ADAPTER_METADATA_KEY sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class DreamBoothLoRAFlux2(ExamplesTestsAccelerate): instance_data_dir = "docs/source/en/imgs" instance_prompt = "dog" pretrained_model_name_or_path = "hf-internal-testing/tiny-flux2" script_path = "examples/dreambooth/train_dreambooth_lora_flux2.py" transformer_layer_type = "single_transformer_blocks.0.attn.to_qkv_mlp_proj" def test_dreambooth_lora_flux2(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --max_sequence_length 8 --text_encoder_out_layers 1 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_latent_caching(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --cache_latents --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --max_sequence_length 8 --text_encoder_out_layers 1 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys()) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_layers(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --cache_latents --learning_rate 5.0e-04 --scale_lr --lora_layers {self.transformer_layer_type} --lr_scheduler constant --lr_warmup_steps 0 --max_sequence_length 8 --text_encoder_out_layers 1 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"transformer"` in their names. In this test, we only params of # transformer.single_transformer_blocks.0.attn.to_k should be in the state dict starts_with_transformer = all( key.startswith(f"transformer.{self.transformer_layer_type}") for key in lora_state_dict.keys() ) self.assertTrue(starts_with_transformer) def test_dreambooth_lora_flux2_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --max_sequence_length 8 --checkpointing_steps=2 --text_encoder_out_layers 1 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_lora_flux2_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=4 --checkpointing_steps=2 --max_sequence_length 8 --text_encoder_out_layers 1 """.split() run_command(self._launch_args + test_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}) resume_run_args = f""" {self.script_path} --pretrained_model_name_or_path={self.pretrained_model_name_or_path} --instance_data_dir={self.instance_data_dir} --output_dir={tmpdir} --instance_prompt={self.instance_prompt} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --max_sequence_length 8 --text_encoder_out_layers 1 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}) def test_dreambooth_lora_with_metadata(self): # Use a `lora_alpha` that is different from `rank`. lora_alpha = 8 rank = 4 with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" {self.script_path} --pretrained_model_name_or_path {self.pretrained_model_name_or_path} --instance_data_dir {self.instance_data_dir} --instance_prompt {self.instance_prompt} --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --lora_alpha={lora_alpha} --rank={rank} --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --max_sequence_length 8 --text_encoder_out_layers 1 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test state_dict_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors") self.assertTrue(os.path.isfile(state_dict_file)) # Check if the metadata was properly serialized. with safetensors.torch.safe_open(state_dict_file, framework="pt", device="cpu") as f: metadata = f.metadata() or {} metadata.pop("format", None) raw = metadata.get(LORA_ADAPTER_METADATA_KEY) if raw: raw = json.loads(raw) loaded_lora_alpha = raw["transformer.lora_alpha"] self.assertTrue(loaded_lora_alpha == lora_alpha) loaded_lora_rank = raw["transformer.r"] self.assertTrue(loaded_lora_rank == rank)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/dreambooth/test_dreambooth_lora_flux2.py", "license": "Apache License 2.0", "lines": 226, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:examples/dreambooth/train_dreambooth_lora_flux2.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "diffusers @ git+https://github.com/huggingface/diffusers.git", # "torch>=2.0.0", # "accelerate>=0.31.0", # "transformers>=4.41.2", # "ftfy", # "tensorboard", # "Jinja2", # "peft>=0.11.1", # "sentencepiece", # "torchvision", # "datasets", # "bitsandbytes", # "prodigyopt", # ] # /// import argparse import copy import itertools import json import logging import math import os import random import shutil import warnings from contextlib import nullcontext from pathlib import Path from typing import Any import numpy as np import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from huggingface_hub.utils import insecure_hashlib from peft import LoraConfig, prepare_model_for_kbit_training, set_peft_model_state_dict from peft.utils import get_peft_model_state_dict from PIL import Image from PIL.ImageOps import exif_transpose from torch.utils.data import Dataset from torch.utils.data.sampler import BatchSampler from torchvision import transforms from torchvision.transforms import functional as TF from tqdm.auto import tqdm from transformers import Mistral3ForConditionalGeneration, PixtralProcessor import diffusers from diffusers import ( AutoencoderKLFlux2, BitsAndBytesConfig, FlowMatchEulerDiscreteScheduler, Flux2Pipeline, Flux2Transformer2DModel, ) from diffusers.optimization import get_scheduler from diffusers.training_utils import ( _collate_lora_metadata, _to_cpu_contiguous, cast_training_params, compute_density_for_timestep_sampling, compute_loss_weighting_for_sd3, find_nearest_bucket, free_memory, get_fsdp_kwargs_from_accelerator, offload_models, parse_buckets_string, wrap_with_fsdp, ) from diffusers.utils import ( check_min_version, convert_unet_state_dict_to_peft, is_wandb_available, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_torch_npu_available from diffusers.utils.torch_utils import is_compiled_module if getattr(torch, "distributed", None) is not None: import torch.distributed as dist if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.37.0.dev0") logger = get_logger(__name__) def save_model_card( repo_id: str, images=None, base_model: str = None, instance_prompt=None, validation_prompt=None, repo_folder=None, quant_training=None, ): widget_dict = [] if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) widget_dict.append( {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}} ) model_description = f""" # Flux2 DreamBooth LoRA - {repo_id} <Gallery /> ## Model description These are {repo_id} DreamBooth LoRA weights for {base_model}. The weights were trained using [DreamBooth](https://dreambooth.github.io/) with the [Flux2 diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_flux2.md). Quant training? {quant_training} ## Trigger words You should use `{instance_prompt}` to trigger the image generation. ## Download model [Download the *.safetensors LoRA]({repo_id}/tree/main) in the Files & versions tab. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.2", torch_dtype=torch.bfloat16).to('cuda') pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors') image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) ## License Please adhere to the licensing terms as described [here](https://huggingface.co/black-forest-labs/FLUX.2/blob/main/LICENSE.md). """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="other", base_model=base_model, prompt=instance_prompt, model_description=model_description, widget=widget_dict, ) tags = [ "text-to-image", "diffusers-training", "diffusers", "lora", "flux2", "flux2-diffusers", "template:sd-lora", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation( pipeline, args, accelerator, pipeline_args, epoch, torch_dtype, is_final_validation=False, ): args.num_validation_images = args.num_validation_images if args.num_validation_images else 1 logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) pipeline = pipeline.to(dtype=torch_dtype) pipeline.enable_model_cpu_offload() pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None autocast_ctx = torch.autocast(accelerator.device.type) if not is_final_validation else nullcontext() images = [] for _ in range(args.num_validation_images): with autocast_ctx: image = pipeline( prompt_embeds=pipeline_args["prompt_embeds"], generator=generator, ).images[0] images.append(image) for tracker in accelerator.trackers: phase_name = "test" if is_final_validation else "validation" if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images(phase_name, np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { phase_name: [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline free_memory() return images def module_filter_fn(mod: torch.nn.Module, fqn: str): # don't convert the output module if fqn == "proj_out": return False # don't convert linear modules with weight dimensions not divisible by 16 if isinstance(mod, torch.nn.Linear): if mod.in_features % 16 != 0 or mod.out_features % 16 != 0: return False return True def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--bnb_quantization_config_path", type=str, default=None, help="Quantization config in a JSON file that will be used to define the bitsandbytes quant config of the DiT.", ) parser.add_argument( "--do_fp8_training", action="store_true", help="if we are doing FP8 training.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--instance_data_dir", type=str, default=None, help=("A folder containing the training data. "), ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image. By " "default, the standard Image Dataset maps out 'file_name' " "to 'image'.", ) parser.add_argument( "--caption_column", type=str, default=None, help="The column of the dataset containing the instance prompt for each image", ) parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.") parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--max_sequence_length", type=int, default=512, help="Maximum sequence length to use with with the T5 text encoder", ) parser.add_argument( "--text_encoder_out_layers", type=int, nargs="+", default=[10, 20, 30], help="Text encoder hidden layers to compute the final text embeddings.", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--skip_final_inference", default=False, action="store_true", help="Whether to skip the final inference step with loaded lora weights upon training completion. This will run intermediate validation inference if `validation_prompt` is provided. Specify to reduce memory.", ) parser.add_argument( "--final_validation_prompt", type=str, default=None, help="A prompt that is used during a final validation to verify that the model is learning. Ignored if `--validation_prompt` is provided.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=50, help=( "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--rank", type=int, default=4, help=("The dimension of the LoRA update matrices."), ) parser.add_argument( "--lora_alpha", type=int, default=4, help="LoRA alpha to be used for additional scaling.", ) parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="flux-dreambooth-lora", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--aspect_ratio_buckets", type=str, default=None, help=( "Aspect ratio buckets to use for training. Define as a string of 'h1,w1;h2,w2;...'. " "e.g. '1024,1024;768,1360;1360,768;880,1168;1168,880;1248,832;832,1248'" "Images will be resized and cropped to fit the nearest bucket. If provided, --resolution is ignored." ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--guidance_scale", type=float, default=3.5, help="the FLUX.1 dev variant is a guidance distilled model", ) parser.add_argument( "--text_encoder_lr", type=float, default=5e-6, help="Text encoder learning rate to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--weighting_scheme", type=str, default="none", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), ) parser.add_argument( "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--mode_scale", type=float, default=1.29, help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", ) parser.add_argument( "--optimizer", type=str, default="AdamW", help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--prodigy_beta3", type=float, default=None, help="coefficients for computing the Prodigy stepsize using running averages. If set to None, " "uses the value of square root of beta2. Ignored if optimizer is adamW", ) parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay") parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params") parser.add_argument( "--adam_weight_decay_text_encoder", type=float, default=1e-03, help="Weight decay to use for text_encoder" ) parser.add_argument( "--lora_layers", type=str, default=None, help=( 'The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. E.g. - "to_k,to_q,to_v,to_out.0" will result in lora training of attention layers only' ), ) parser.add_argument( "--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer and Prodigy optimizers.", ) parser.add_argument( "--prodigy_use_bias_correction", type=bool, default=True, help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW", ) parser.add_argument( "--prodigy_safeguard_warmup", type=bool, default=True, help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. " "Ignored if optimizer is adamW", ) parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--cache_latents", action="store_true", default=False, help="Cache the VAE latents", ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--upcast_before_saving", action="store_true", default=False, help=( "Whether to upcast the trained transformer layers to float32 before saving (at the end of training). " "Defaults to precision dtype used for training to save memory" ), ) parser.add_argument( "--offload", action="store_true", help="Whether to offload the VAE and the text encoder to CPU when they are not used.", ) parser.add_argument( "--remote_text_encoder", action="store_true", help="Whether to use a remote text encoder. This means the text encoder will not be loaded locally and instead, the prompt embeddings will be computed remotely using the HuggingFace Inference API.", ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") parser.add_argument("--fsdp_text_encoder", action="store_true", help="Use FSDP for text encoder") if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() if args.dataset_name is None and args.instance_data_dir is None: raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`") if args.dataset_name is not None and args.instance_data_dir is not None: raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`") if args.do_fp8_training and args.bnb_quantization_config_path: raise ValueError("Both `do_fp8_training` and `bnb_quantization_config_path` cannot be passed.") env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images. """ def __init__( self, instance_data_root, instance_prompt, class_prompt, class_data_root=None, class_num=None, size=1024, repeats=1, center_crop=False, buckets=None, ): self.size = size self.center_crop = center_crop self.instance_prompt = instance_prompt self.custom_instance_prompts = None self.class_prompt = class_prompt self.buckets = buckets # if --dataset_name is provided or a metadata jsonl file is provided in the local --instance_data directory, # we load the training data using load_dataset if args.dataset_name is not None: try: from datasets import load_dataset except ImportError: raise ImportError( "You are trying to load your data using the datasets library. If you wish to train using custom " "captions please install the datasets library: `pip install datasets`. If you wish to load a " "local folder containing images only, specify --instance_data_dir instead." ) # Downloading and loading a dataset from the hub. # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) # Preprocessing the datasets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. if args.image_column is None: image_column = column_names[0] logger.info(f"image column defaulting to {image_column}") else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) instance_images = dataset["train"][image_column] if args.caption_column is None: logger.info( "No caption column provided, defaulting to instance_prompt for all images. If your dataset " "contains captions/prompts for the images, make sure to specify the " "column as --caption_column" ) self.custom_instance_prompts = None else: if args.caption_column not in column_names: raise ValueError( f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) custom_instance_prompts = dataset["train"][args.caption_column] # create final list of captions according to --repeats self.custom_instance_prompts = [] for caption in custom_instance_prompts: self.custom_instance_prompts.extend(itertools.repeat(caption, repeats)) else: self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())] self.custom_instance_prompts = None self.instance_images = [] for img in instance_images: self.instance_images.extend(itertools.repeat(img, repeats)) self.pixel_values = [] for i, image in enumerate(self.instance_images): image = exif_transpose(image) if not image.mode == "RGB": image = image.convert("RGB") width, height = image.size # Find the closest bucket bucket_idx = find_nearest_bucket(height, width, self.buckets) target_height, target_width = self.buckets[bucket_idx] self.size = (target_height, target_width) # based on the bucket assignment, define the transformations image = self.train_transform( image, size=self.size, center_crop=args.center_crop, random_flip=args.random_flip, ) self.pixel_values.append((image, bucket_idx)) self.num_instance_images = len(self.instance_images) self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) if class_num is not None: self.num_class_images = min(len(self.class_images_path), class_num) else: self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image, bucket_idx = self.pixel_values[index % self.num_instance_images] example["instance_images"] = instance_image example["bucket_idx"] = bucket_idx if self.custom_instance_prompts: caption = self.custom_instance_prompts[index % self.num_instance_images] if caption: example["instance_prompt"] = caption else: example["instance_prompt"] = self.instance_prompt else: # custom prompts were provided, but length does not match size of image dataset example["instance_prompt"] = self.instance_prompt if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) class_image = exif_transpose(class_image) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt"] = self.class_prompt return example def train_transform(self, image, size=(224, 224), center_crop=False, random_flip=False): # 1. Resize (deterministic) resize = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR) image = resize(image) # 2. Crop: either center or SAME random crop if center_crop: crop = transforms.CenterCrop(size) image = crop(image) else: # get_params returns (i, j, h, w) i, j, h, w = transforms.RandomCrop.get_params(image, output_size=size) image = TF.crop(image, i, j, h, w) # 3. Random horizontal flip with the SAME coin flip if random_flip: do_flip = random.random() < 0.5 if do_flip: image = TF.hflip(image) # 4. ToTensor + Normalize (deterministic) to_tensor = transforms.ToTensor() normalize = transforms.Normalize([0.5], [0.5]) image = normalize(to_tensor(image)) return image def collate_fn(examples, with_prior_preservation=False): pixel_values = [example["instance_images"] for example in examples] prompts = [example["instance_prompt"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: pixel_values += [example["class_images"] for example in examples] prompts += [example["class_prompt"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() batch = {"pixel_values": pixel_values, "prompts": prompts} return batch class BucketBatchSampler(BatchSampler): def __init__(self, dataset: DreamBoothDataset, batch_size: int, drop_last: bool = False): if not isinstance(batch_size, int) or batch_size <= 0: raise ValueError("batch_size should be a positive integer value, but got batch_size={}".format(batch_size)) if not isinstance(drop_last, bool): raise ValueError("drop_last should be a boolean value, but got drop_last={}".format(drop_last)) self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last # Group indices by bucket self.bucket_indices = [[] for _ in range(len(self.dataset.buckets))] for idx, (_, bucket_idx) in enumerate(self.dataset.pixel_values): self.bucket_indices[bucket_idx].append(idx) self.sampler_len = 0 self.batches = [] # Pre-generate batches for each bucket for indices_in_bucket in self.bucket_indices: # Shuffle indices within the bucket random.shuffle(indices_in_bucket) # Create batches for i in range(0, len(indices_in_bucket), self.batch_size): batch = indices_in_bucket[i : i + self.batch_size] if len(batch) < self.batch_size and self.drop_last: continue # Skip partial batch if drop_last is True self.batches.append(batch) self.sampler_len += 1 # Count the number of batches def __iter__(self): # Shuffle the order of the batches each epoch random.shuffle(self.batches) for batch in self.batches: yield batch def __len__(self): return self.sampler_len class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) if torch.backends.mps.is_available() and args.mixed_precision == "bf16": # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) if args.do_fp8_training: from torchao.float8 import Float8LinearConfig, convert_to_float8_training logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs], ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: has_supported_fp16_accelerator = torch.cuda.is_available() or torch.backends.mps.is_available() torch_dtype = torch.float16 if has_supported_fp16_accelerator else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = Flux2Pipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, revision=args.revision, variant=args.variant, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): with torch.autocast(device_type=accelerator.device.type, dtype=torch_dtype): images = pipeline(prompt=example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline free_memory() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, ).repo_id # Load the tokenizers tokenizer = PixtralProcessor.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, ) # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Load scheduler and models noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder="scheduler", revision=args.revision, ) noise_scheduler_copy = copy.deepcopy(noise_scheduler) vae = AutoencoderKLFlux2.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant, ) latents_bn_mean = vae.bn.running_mean.view(1, -1, 1, 1).to(accelerator.device) latents_bn_std = torch.sqrt(vae.bn.running_var.view(1, -1, 1, 1) + vae.config.batch_norm_eps).to( accelerator.device ) quantization_config = None if args.bnb_quantization_config_path is not None: with open(args.bnb_quantization_config_path, "r") as f: config_kwargs = json.load(f) if "load_in_4bit" in config_kwargs and config_kwargs["load_in_4bit"]: config_kwargs["bnb_4bit_compute_dtype"] = weight_dtype quantization_config = BitsAndBytesConfig(**config_kwargs) transformer = Flux2Transformer2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant, quantization_config=quantization_config, torch_dtype=weight_dtype, ) if args.bnb_quantization_config_path is not None: transformer = prepare_model_for_kbit_training(transformer, use_gradient_checkpointing=False) if not args.remote_text_encoder: text_encoder = Mistral3ForConditionalGeneration.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) text_encoder.requires_grad_(False) # We only train the additional adapter LoRA layers transformer.requires_grad_(False) vae.requires_grad_(False) if args.enable_npu_flash_attention: if is_torch_npu_available(): logger.info("npu flash attention enabled.") transformer.set_attention_backend("_native_npu") else: raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16: # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) to_kwargs = {"dtype": weight_dtype, "device": accelerator.device} if not args.offload else {"dtype": weight_dtype} # flux vae is stable in bf16 so load it in weight_dtype to reduce memory vae.to(**to_kwargs) # we never offload the transformer to CPU, so we can just use the accelerator device transformer_to_kwargs = ( {"device": accelerator.device} if args.bnb_quantization_config_path is not None else {"device": accelerator.device, "dtype": weight_dtype} ) is_fsdp = getattr(accelerator.state, "fsdp_plugin", None) is not None if not is_fsdp: transformer.to(**transformer_to_kwargs) if args.do_fp8_training: convert_to_float8_training( transformer, module_filter_fn=module_filter_fn, config=Float8LinearConfig(pad_inner_dim=True) ) if not args.remote_text_encoder: text_encoder.to(**to_kwargs) # Initialize a text encoding pipeline and keep it to CPU for now. text_encoding_pipeline = Flux2Pipeline.from_pretrained( args.pretrained_model_name_or_path, vae=None, transformer=None, tokenizer=tokenizer, text_encoder=text_encoder, scheduler=None, revision=args.revision, ) if args.gradient_checkpointing: transformer.enable_gradient_checkpointing() if args.lora_layers is not None: target_modules = [layer.strip() for layer in args.lora_layers.split(",")] else: target_modules = ["to_k", "to_q", "to_v", "to_out.0"] # now we will add new LoRA weights the transformer layers transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) transformer.add_adapter(transformer_lora_config) def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): transformer_cls = type(unwrap_model(transformer)) # 1) Validate and pick the transformer model modules_to_save: dict[str, Any] = {} transformer_model = None for model in models: if isinstance(unwrap_model(model), transformer_cls): transformer_model = model modules_to_save["transformer"] = model else: raise ValueError(f"unexpected save model: {model.__class__}") if transformer_model is None: raise ValueError("No transformer model found in 'models'") # 2) Optionally gather FSDP state dict once state_dict = accelerator.get_state_dict(model) if is_fsdp else None # 3) Only main process materializes the LoRA state dict transformer_lora_layers_to_save = None if accelerator.is_main_process: peft_kwargs = {} if is_fsdp: peft_kwargs["state_dict"] = state_dict transformer_lora_layers_to_save = get_peft_model_state_dict( unwrap_model(transformer_model) if is_fsdp else transformer_model, **peft_kwargs, ) if is_fsdp: transformer_lora_layers_to_save = _to_cpu_contiguous(transformer_lora_layers_to_save) # make sure to pop weight so that corresponding model is not saved again if weights: weights.pop() Flux2Pipeline.save_lora_weights( output_dir, transformer_lora_layers=transformer_lora_layers_to_save, **_collate_lora_metadata(modules_to_save), ) def load_model_hook(models, input_dir): transformer_ = None if not is_fsdp: while len(models) > 0: model = models.pop() if isinstance(unwrap_model(model), type(unwrap_model(transformer))): transformer_ = unwrap_model(model) else: raise ValueError(f"unexpected save model: {model.__class__}") else: transformer_ = Flux2Transformer2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", ) transformer_.add_adapter(transformer_lora_config) lora_state_dict = Flux2Pipeline.lora_state_dict(input_dir) transformer_state_dict = { f"{k.replace('transformer.', '')}": v for k, v in lora_state_dict.items() if k.startswith("transformer.") } transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict) incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default") if incompatible_keys is not None: # check only for unexpected keys unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) if unexpected_keys: logger.warning( f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " f" {unexpected_keys}. " ) # Make sure the trainable params are in float32. This is again needed since the base models # are in `weight_dtype`. More details: # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804 if args.mixed_precision == "fp16": models = [transformer_] # only upcast trainable parameters (LoRA) into fp32 cast_training_params(models) accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32 and torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Make sure the trainable params are in float32. if args.mixed_precision == "fp16": models = [transformer] # only upcast trainable parameters (LoRA) into fp32 cast_training_params(models, dtype=torch.float32) transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters())) # Optimization parameters transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate} params_to_optimize = [transformer_parameters_with_lr] # Optimizer creation if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): logger.warning( f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." "Defaulting to adamW" ) args.optimizer = "adamw" if args.use_8bit_adam and not args.optimizer.lower() == "adamw": logger.warning( f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"set to {args.optimizer.lower()}" ) if args.optimizer.lower() == "adamw": if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW optimizer = optimizer_class( params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) if args.optimizer.lower() == "prodigy": try: import prodigyopt except ImportError: raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`") optimizer_class = prodigyopt.Prodigy if args.learning_rate <= 0.1: logger.warning( "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" ) optimizer = optimizer_class( params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), beta3=args.prodigy_beta3, weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, decouple=args.prodigy_decouple, use_bias_correction=args.prodigy_use_bias_correction, safeguard_warmup=args.prodigy_safeguard_warmup, ) if args.aspect_ratio_buckets is not None: buckets = parse_buckets_string(args.aspect_ratio_buckets) else: buckets = [(args.resolution, args.resolution)] logger.info(f"Using parsed aspect ratio buckets: {buckets}") # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_prompt=args.class_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_num=args.num_class_images, size=args.resolution, repeats=args.repeats, center_crop=args.center_crop, buckets=buckets, ) batch_sampler = BucketBatchSampler(train_dataset, batch_size=args.train_batch_size, drop_last=True) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_sampler=batch_sampler, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.dataloader_num_workers, ) def compute_text_embeddings(prompt, text_encoding_pipeline): with torch.no_grad(): prompt_embeds, text_ids = text_encoding_pipeline.encode_prompt( prompt=prompt, max_sequence_length=args.max_sequence_length, text_encoder_out_layers=args.text_encoder_out_layers, ) return prompt_embeds, text_ids def compute_remote_text_embeddings(prompts): import io import requests if args.hub_token is not None: hf_token = args.hub_token else: from huggingface_hub import get_token hf_token = get_token() if hf_token is None: raise ValueError( "No HuggingFace token found. To use the remote text encoder please login using `hf auth login` or provide a token using --hub_token" ) def _encode_single(prompt: str): response = requests.post( "https://remote-text-encoder-flux-2.huggingface.co/predict", json={"prompt": prompt}, headers={"Authorization": f"Bearer {hf_token}", "Content-Type": "application/json"}, ) assert response.status_code == 200, f"{response.status_code=}" return torch.load(io.BytesIO(response.content)) try: if isinstance(prompts, (list, tuple)): embeds = [_encode_single(p) for p in prompts] prompt_embeds = torch.cat(embeds, dim=0) else: prompt_embeds = _encode_single(prompts) text_ids = Flux2Pipeline._prepare_text_ids(prompt_embeds).to(accelerator.device) prompt_embeds = prompt_embeds.to(accelerator.device) return prompt_embeds, text_ids except Exception as e: raise RuntimeError("Remote text encoder inference failed.") from e # If no type of tuning is done on the text_encoder and custom instance prompts are NOT # provided (i.e. the --instance_prompt is used for all images), we encode the instance prompt once to avoid # the redundant encoding. if not train_dataset.custom_instance_prompts: if args.remote_text_encoder: instance_prompt_hidden_states, instance_text_ids = compute_remote_text_embeddings(args.instance_prompt) else: with offload_models(text_encoding_pipeline, device=accelerator.device, offload=args.offload): instance_prompt_hidden_states, instance_text_ids = compute_text_embeddings( args.instance_prompt, text_encoding_pipeline ) # Handle class prompt for prior-preservation. if args.with_prior_preservation: if args.remote_text_encoder: class_prompt_hidden_states, class_text_ids = compute_remote_text_embeddings(args.class_prompt) else: with offload_models(text_encoding_pipeline, device=accelerator.device, offload=args.offload): class_prompt_hidden_states, class_text_ids = compute_text_embeddings( args.class_prompt, text_encoding_pipeline ) validation_embeddings = {} if args.validation_prompt is not None: if args.remote_text_encoder: (validation_embeddings["prompt_embeds"], validation_embeddings["text_ids"]) = ( compute_remote_text_embeddings(args.validation_prompt) ) else: with offload_models(text_encoding_pipeline, device=accelerator.device, offload=args.offload): (validation_embeddings["prompt_embeds"], validation_embeddings["text_ids"]) = compute_text_embeddings( args.validation_prompt, text_encoding_pipeline ) # Init FSDP for text encoder if args.fsdp_text_encoder: fsdp_kwargs = get_fsdp_kwargs_from_accelerator(accelerator) text_encoder_fsdp = wrap_with_fsdp( model=text_encoding_pipeline.text_encoder, device=accelerator.device, offload=args.offload, limit_all_gathers=True, use_orig_params=True, fsdp_kwargs=fsdp_kwargs, ) text_encoding_pipeline.text_encoder = text_encoder_fsdp dist.barrier() # If custom instance prompts are NOT provided (i.e. the instance prompt is used for all images), # pack the statically computed variables appropriately here. This is so that we don't # have to pass them to the dataloader. if not train_dataset.custom_instance_prompts: prompt_embeds = instance_prompt_hidden_states text_ids = instance_text_ids if args.with_prior_preservation: prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0) text_ids = torch.cat([text_ids, class_text_ids], dim=0) # if cache_latents is set to True, we encode images to latents and store them. # Similar to pre-encoding in the case of a single instance prompt, if custom prompts are provided # we encode them in advance as well. precompute_latents = args.cache_latents or train_dataset.custom_instance_prompts if precompute_latents: prompt_embeds_cache = [] text_ids_cache = [] latents_cache = [] for batch in tqdm(train_dataloader, desc="Caching latents"): with torch.no_grad(): if args.cache_latents: with offload_models(vae, device=accelerator.device, offload=args.offload): batch["pixel_values"] = batch["pixel_values"].to( accelerator.device, non_blocking=True, dtype=vae.dtype ) latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) if train_dataset.custom_instance_prompts: if args.remote_text_encoder: prompt_embeds, text_ids = compute_remote_text_embeddings(batch["prompts"]) elif args.fsdp_text_encoder: prompt_embeds, text_ids = compute_text_embeddings(batch["prompts"], text_encoding_pipeline) else: with offload_models(text_encoding_pipeline, device=accelerator.device, offload=args.offload): prompt_embeds, text_ids = compute_text_embeddings(batch["prompts"], text_encoding_pipeline) prompt_embeds_cache.append(prompt_embeds) text_ids_cache.append(text_ids) # move back to cpu before deleting to ensure memory is freed see: https://github.com/huggingface/diffusers/issues/11376#issue-3008144624 if args.cache_latents: vae = vae.to("cpu") del vae # move back to cpu before deleting to ensure memory is freed see: https://github.com/huggingface/diffusers/issues/11376#issue-3008144624 if not args.remote_text_encoder: text_encoding_pipeline = text_encoding_pipeline.to("cpu") del text_encoder, tokenizer free_memory() # Scheduler and math around the number of training steps. # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation. num_warmup_steps_for_scheduler = args.lr_warmup_steps * accelerator.num_processes if args.max_train_steps is None: len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) num_training_steps_for_scheduler = ( args.num_train_epochs * accelerator.num_processes * num_update_steps_per_epoch ) else: num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=num_warmup_steps_for_scheduler, num_training_steps=num_training_steps_for_scheduler, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( transformer, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch if num_training_steps_for_scheduler != args.max_train_steps: logger.warning( f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match " f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. " f"This inconsistency may result in the learning rate scheduler not functioning properly." ) # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_name = "dreambooth-flux2-lora" args_cp = vars(args).copy() args_cp["text_encoder_out_layers"] = str(args_cp["text_encoder_out_layers"]) accelerator.init_trackers(tracker_name, config=args_cp) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype) schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device) timesteps = timesteps.to(accelerator.device) step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < n_dim: sigma = sigma.unsqueeze(-1) return sigma for epoch in range(first_epoch, args.num_train_epochs): transformer.train() for step, batch in enumerate(train_dataloader): models_to_accumulate = [transformer] prompts = batch["prompts"] with accelerator.accumulate(models_to_accumulate): if train_dataset.custom_instance_prompts: prompt_embeds = prompt_embeds_cache[step] text_ids = text_ids_cache[step] else: num_repeat_elements = len(prompts) prompt_embeds = prompt_embeds.repeat(num_repeat_elements, 1, 1) text_ids = text_ids.repeat(num_repeat_elements, 1, 1) # Convert images to latent space if args.cache_latents: model_input = latents_cache[step].mode() else: with offload_models(vae, device=accelerator.device, offload=args.offload): pixel_values = batch["pixel_values"].to(dtype=vae.dtype) model_input = vae.encode(pixel_values).latent_dist.mode() model_input = Flux2Pipeline._patchify_latents(model_input) model_input = (model_input - latents_bn_mean) / latents_bn_std model_input_ids = Flux2Pipeline._prepare_latent_ids(model_input).to(device=model_input.device) # Sample noise that we'll add to the latents noise = torch.randn_like(model_input) bsz = model_input.shape[0] # Sample a random timestep for each image # for weighting schemes where we sample timesteps non-uniformly u = compute_density_for_timestep_sampling( weighting_scheme=args.weighting_scheme, batch_size=bsz, logit_mean=args.logit_mean, logit_std=args.logit_std, mode_scale=args.mode_scale, ) indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device) # Add noise according to flow matching. # zt = (1 - texp) * x + texp * z1 sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype) noisy_model_input = (1.0 - sigmas) * model_input + sigmas * noise # [B, C, H, W] -> [B, H*W, C] packed_noisy_model_input = Flux2Pipeline._pack_latents(noisy_model_input) # handle guidance guidance = torch.full([1], args.guidance_scale, device=accelerator.device) guidance = guidance.expand(model_input.shape[0]) # Predict the noise residual model_pred = transformer( hidden_states=packed_noisy_model_input, # (B, image_seq_len, C) timestep=timesteps / 1000, guidance=guidance, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, # B, text_seq_len, 4 img_ids=model_input_ids, # B, image_seq_len, 4 return_dict=False, )[0] model_pred = model_pred[:, : packed_noisy_model_input.size(1) :] model_pred = Flux2Pipeline._unpack_latents_with_ids(model_pred, model_input_ids) # these weighting schemes use a uniform timestep sampling # and instead post-weight the loss weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) # flow matching loss target = noise - model_input if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute prior loss prior_loss = torch.mean( (weighting.float() * (model_pred_prior.float() - target_prior.float()) ** 2).reshape( target_prior.shape[0], -1 ), 1, ) prior_loss = prior_loss.mean() # Compute regular loss. loss = torch.mean( (weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), 1, ) loss = loss.mean() if args.with_prior_preservation: # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = transformer.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process or is_fsdp: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: # create pipeline pipeline = Flux2Pipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=None, tokenizer=None, transformer=unwrap_model(transformer), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=validation_embeddings, epoch=epoch, torch_dtype=weight_dtype, ) del pipeline free_memory() # Save the lora layers accelerator.wait_for_everyone() if is_fsdp: transformer = unwrap_model(transformer) state_dict = accelerator.get_state_dict(transformer) if accelerator.is_main_process: modules_to_save = {} if is_fsdp: if args.bnb_quantization_config_path is None: if args.upcast_before_saving: state_dict = { k: v.to(torch.float32) if isinstance(v, torch.Tensor) else v for k, v in state_dict.items() } else: state_dict = { k: v.to(weight_dtype) if isinstance(v, torch.Tensor) else v for k, v in state_dict.items() } transformer_lora_layers = get_peft_model_state_dict( transformer, state_dict=state_dict, ) transformer_lora_layers = { k: v.detach().cpu().contiguous() if isinstance(v, torch.Tensor) else v for k, v in transformer_lora_layers.items() } else: transformer = unwrap_model(transformer) if args.bnb_quantization_config_path is None: if args.upcast_before_saving: transformer.to(torch.float32) else: transformer = transformer.to(weight_dtype) transformer_lora_layers = get_peft_model_state_dict(transformer) modules_to_save["transformer"] = transformer Flux2Pipeline.save_lora_weights( save_directory=args.output_dir, transformer_lora_layers=transformer_lora_layers, **_collate_lora_metadata(modules_to_save), ) images = [] run_validation = (args.validation_prompt and args.num_validation_images > 0) or (args.final_validation_prompt) should_run_final_inference = not args.skip_final_inference and run_validation if should_run_final_inference: pipeline = Flux2Pipeline.from_pretrained( args.pretrained_model_name_or_path, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) # load attention processors pipeline.load_lora_weights(args.output_dir) # run inference images = [] if args.validation_prompt and args.num_validation_images > 0: images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=validation_embeddings, epoch=epoch, is_final_validation=True, torch_dtype=weight_dtype, ) images = None del pipeline free_memory() validation_prompt = args.validation_prompt if args.validation_prompt else args.final_validation_prompt quant_training = None if args.do_fp8_training: quant_training = "FP8 TorchAO" elif args.bnb_quantization_config_path: quant_training = "BitsandBytes" save_model_card( (args.hub_model_id or Path(args.output_dir).name) if not args.push_to_hub else repo_id, images=images, base_model=args.pretrained_model_name_or_path, instance_prompt=args.instance_prompt, validation_prompt=validation_prompt, repo_folder=args.output_dir, quant_training=quant_training, ) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/dreambooth/train_dreambooth_lora_flux2.py", "license": "Apache License 2.0", "lines": 1755, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/dreambooth/train_dreambooth_lora_flux2_img2img.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "diffusers @ git+https://github.com/huggingface/diffusers.git", # "torch>=2.0.0", # "accelerate>=0.31.0", # "transformers>=4.41.2", # "ftfy", # "tensorboard", # "Jinja2", # "peft>=0.11.1", # "sentencepiece", # "torchvision", # "datasets", # "bitsandbytes", # "prodigyopt", # ] # /// import argparse import copy import itertools import json import logging import math import os import random import shutil from contextlib import nullcontext from pathlib import Path from typing import Any import numpy as np import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed from huggingface_hub import create_repo, upload_folder from peft import LoraConfig, prepare_model_for_kbit_training, set_peft_model_state_dict from peft.utils import get_peft_model_state_dict from PIL import Image from PIL.ImageOps import exif_transpose from torch.utils.data import Dataset from torch.utils.data.sampler import BatchSampler from torchvision import transforms from torchvision.transforms import functional as TF from tqdm.auto import tqdm from transformers import Mistral3ForConditionalGeneration, PixtralProcessor import diffusers from diffusers import ( AutoencoderKLFlux2, BitsAndBytesConfig, FlowMatchEulerDiscreteScheduler, Flux2Pipeline, Flux2Transformer2DModel, ) from diffusers.optimization import get_scheduler from diffusers.pipelines.flux2.image_processor import Flux2ImageProcessor from diffusers.training_utils import ( _collate_lora_metadata, _to_cpu_contiguous, cast_training_params, compute_density_for_timestep_sampling, compute_loss_weighting_for_sd3, find_nearest_bucket, free_memory, get_fsdp_kwargs_from_accelerator, offload_models, parse_buckets_string, wrap_with_fsdp, ) from diffusers.utils import ( check_min_version, convert_unet_state_dict_to_peft, is_wandb_available, load_image, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_torch_npu_available from diffusers.utils.torch_utils import is_compiled_module if getattr(torch, "distributed", None) is not None: import torch.distributed as dist if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.37.0.dev0") logger = get_logger(__name__) def save_model_card( repo_id: str, images=None, base_model: str = None, instance_prompt=None, validation_prompt=None, repo_folder=None, fp8_training=False, ): widget_dict = [] if images is not None: for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) widget_dict.append( {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}} ) model_description = f""" # Flux.2 DreamBooth LoRA - {repo_id} <Gallery /> ## Model description These are {repo_id} DreamBooth LoRA weights for {base_model}. The weights were trained using [DreamBooth](https://dreambooth.github.io/) with the [Flux2 diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_flux2.md). FP8 training? {fp8_training} ## Trigger words You should use `{instance_prompt}` to trigger the image generation. ## Download model [Download the *.safetensors LoRA]({repo_id}/tree/main) in the Files & versions tab. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.2", torch_dtype=torch.bfloat16).to('cuda') pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors') image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) ## License Please adhere to the licensing terms as described [here](https://huggingface.co/black-forest-labs/FLUX.2/blob/main/LICENSE.md). """ model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="other", base_model=base_model, prompt=instance_prompt, model_description=model_description, widget=widget_dict, ) tags = [ "text-to-image", "diffusers-training", "diffusers", "lora", "flux2", "flux2-diffusers", "template:sd-lora", ] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation( pipeline, args, accelerator, pipeline_args, epoch, torch_dtype, is_final_validation=False, ): args.num_validation_images = args.num_validation_images if args.num_validation_images else 1 logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) pipeline = pipeline.to(dtype=torch_dtype) pipeline.enable_model_cpu_offload() pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None autocast_ctx = torch.autocast(accelerator.device.type) if not is_final_validation else nullcontext() images = [] for _ in range(args.num_validation_images): with autocast_ctx: image = pipeline( image=pipeline_args["image"], prompt_embeds=pipeline_args["prompt_embeds"], generator=generator, ).images[0] images.append(image) for tracker in accelerator.trackers: phase_name = "test" if is_final_validation else "validation" if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images(phase_name, np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { phase_name: [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline free_memory() return images def module_filter_fn(mod: torch.nn.Module, fqn: str): # don't convert the output module if fqn == "proj_out": return False # don't convert linear modules with weight dimensions not divisible by 16 if isinstance(mod, torch.nn.Linear): if mod.in_features % 16 != 0 or mod.out_features % 16 != 0: return False return True def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--bnb_quantization_config_path", type=str, default=None, help="Quantization config in a JSON file that will be used to define the bitsandbytes quant config of the DiT.", ) parser.add_argument( "--do_fp8_training", action="store_true", help="if we are doing FP8 training.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--instance_data_dir", type=str, default=None, help=("A folder containing the training data. "), ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing the target image. By " "default, the standard Image Dataset maps out 'file_name' " "to 'image'.", ) parser.add_argument( "--cond_image_column", type=str, default=None, help="Column in the dataset containing the condition image. Must be specified when performing I2I fine-tuning", ) parser.add_argument( "--caption_column", type=str, default=None, help="The column of the dataset containing the instance prompt for each image", ) parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.") parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=False, help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'", ) parser.add_argument( "--max_sequence_length", type=int, default=512, help="Maximum sequence length to use with with the T5 text encoder", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--validation_image", type=str, default=None, help="path to an image that is used during validation as the condition image to verify that the model is learning.", ) parser.add_argument( "--skip_final_inference", default=False, action="store_true", help="Whether to skip the final inference step with loaded lora weights upon training completion. This will run intermediate validation inference if `validation_prompt` is provided. Specify to reduce memory.", ) parser.add_argument( "--final_validation_prompt", type=str, default=None, help="A prompt that is used during a final validation to verify that the model is learning. Ignored if `--validation_prompt` is provided.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=50, help=( "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--rank", type=int, default=4, help=("The dimension of the LoRA update matrices."), ) parser.add_argument( "--lora_alpha", type=int, default=4, help="LoRA alpha to be used for additional scaling.", ) parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers") parser.add_argument( "--output_dir", type=str, default="flux-dreambooth-lora", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--aspect_ratio_buckets", type=str, default=None, help=( "Aspect ratio buckets to use for training. Define as a string of 'h1,w1;h2,w2;...'. " "e.g. '1024,1024;768,1360;1360,768;880,1168;1168,880;1248,832;832,1248'" "Images will be resized and cropped to fit the nearest bucket. If provided, --resolution is ignored." ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--guidance_scale", type=float, default=3.5, help="the FLUX.1 dev variant is a guidance distilled model", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--weighting_scheme", type=str, default="none", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), ) parser.add_argument( "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." ) parser.add_argument( "--mode_scale", type=float, default=1.29, help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", ) parser.add_argument( "--optimizer", type=str, default="AdamW", help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'), ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers." ) parser.add_argument( "--prodigy_beta3", type=float, default=None, help="coefficients for computing the Prodigy stepsize using running averages. If set to None, " "uses the value of square root of beta2. Ignored if optimizer is adamW", ) parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay") parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params") parser.add_argument( "--adam_weight_decay_text_encoder", type=float, default=1e-03, help="Weight decay to use for text_encoder" ) parser.add_argument( "--lora_layers", type=str, default=None, help=( 'The transformer modules to apply LoRA training on. Please specify the layers in a comma separated. E.g. - "to_k,to_q,to_v,to_out.0" will result in lora training of attention layers only' ), ) parser.add_argument( "--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer and Prodigy optimizers.", ) parser.add_argument( "--prodigy_use_bias_correction", type=bool, default=True, help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW", ) parser.add_argument( "--prodigy_safeguard_warmup", type=bool, default=True, help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. " "Ignored if optimizer is adamW", ) parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--cache_latents", action="store_true", default=False, help="Cache the VAE latents", ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--upcast_before_saving", action="store_true", default=False, help=( "Whether to upcast the trained transformer layers to float32 before saving (at the end of training). " "Defaults to precision dtype used for training to save memory" ), ) parser.add_argument( "--offload", action="store_true", help="Whether to offload the VAE and the text encoder to CPU when they are not used.", ) parser.add_argument( "--remote_text_encoder", action="store_true", help="Whether to use a remote text encoder. This means the text encoder will not be loaded locally and instead, the prompt embeddings will be computed remotely using the HuggingFace Inference API.", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--enable_npu_flash_attention", action="store_true", help="Enabla Flash Attention for NPU") parser.add_argument("--fsdp_text_encoder", action="store_true", help="Use FSDP for text encoder") if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() if args.cond_image_column is None: raise ValueError( "you must provide --cond_image_column for image-to-image training. Otherwise please see Flux2 text-to-image training example." ) else: assert args.image_column is not None assert args.caption_column is not None if args.dataset_name is None and args.instance_data_dir is None: raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`") if args.dataset_name is not None and args.instance_data_dir is not None: raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`") env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank return args class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images. """ def __init__( self, instance_data_root, instance_prompt, size=1024, repeats=1, center_crop=False, buckets=None, ): self.size = size self.center_crop = center_crop self.instance_prompt = instance_prompt self.custom_instance_prompts = None self.buckets = buckets # if --dataset_name is provided or a metadata jsonl file is provided in the local --instance_data directory, # we load the training data using load_dataset if args.dataset_name is not None: try: from datasets import load_dataset except ImportError: raise ImportError( "You are trying to load your data using the datasets library. If you wish to train using custom " "captions please install the datasets library: `pip install datasets`. If you wish to load a " "local folder containing images only, specify --instance_data_dir instead." ) # Downloading and loading a dataset from the hub. # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) # Preprocessing the datasets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. if args.cond_image_column is not None and args.cond_image_column not in column_names: raise ValueError( f"`--cond_image_column` value '{args.cond_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) if args.image_column is None: image_column = column_names[0] logger.info(f"image column defaulting to {image_column}") else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) instance_images = dataset["train"][image_column] cond_images = None cond_image_column = args.cond_image_column if cond_image_column is not None: cond_images = [dataset["train"][i][cond_image_column] for i in range(len(dataset["train"]))] assert len(instance_images) == len(cond_images) if args.caption_column is None: logger.info( "No caption column provided, defaulting to instance_prompt for all images. If your dataset " "contains captions/prompts for the images, make sure to specify the " "column as --caption_column" ) self.custom_instance_prompts = None else: if args.caption_column not in column_names: raise ValueError( f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" ) custom_instance_prompts = dataset["train"][args.caption_column] # create final list of captions according to --repeats self.custom_instance_prompts = [] for caption in custom_instance_prompts: self.custom_instance_prompts.extend(itertools.repeat(caption, repeats)) else: self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())] self.custom_instance_prompts = None self.instance_images = [] self.cond_images = [] for i, img in enumerate(instance_images): self.instance_images.extend(itertools.repeat(img, repeats)) if args.dataset_name is not None and cond_images is not None: self.cond_images.extend(itertools.repeat(cond_images[i], repeats)) self.pixel_values = [] self.cond_pixel_values = [] for i, image in enumerate(self.instance_images): image = exif_transpose(image) if not image.mode == "RGB": image = image.convert("RGB") dest_image = None if self.cond_images: # todo: take care of max area for buckets dest_image = self.cond_images[i] image_width, image_height = dest_image.size if image_width * image_height > 1024 * 1024: dest_image = Flux2ImageProcessor._resize_to_target_area(dest_image, 1024 * 1024) image_width, image_height = dest_image.size multiple_of = 2 ** (4 - 1) # 2 ** (len(vae.config.block_out_channels) - 1), temp! image_width = (image_width // multiple_of) * multiple_of image_height = (image_height // multiple_of) * multiple_of image_processor = Flux2ImageProcessor() dest_image = image_processor.preprocess( dest_image, height=image_height, width=image_width, resize_mode="crop" ) # Convert back to PIL dest_image = dest_image.squeeze(0) if dest_image.min() < 0: dest_image = (dest_image + 1) / 2 dest_image = (torch.clamp(dest_image, 0, 1) * 255).byte().cpu() if dest_image.shape[0] == 1: # Gray scale image dest_image = Image.fromarray(dest_image.squeeze().numpy(), mode="L") else: # RGB scale image: (C, H, W) -> (H, W, C) dest_image = TF.to_pil_image(dest_image) dest_image = exif_transpose(dest_image) if not dest_image.mode == "RGB": dest_image = dest_image.convert("RGB") width, height = image.size # Find the closest bucket bucket_idx = find_nearest_bucket(height, width, self.buckets) target_height, target_width = self.buckets[bucket_idx] self.size = (target_height, target_width) # based on the bucket assignment, define the transformations image, dest_image = self.paired_transform( image, dest_image=dest_image, size=self.size, center_crop=args.center_crop, random_flip=args.random_flip, ) self.pixel_values.append((image, bucket_idx)) if dest_image is not None: self.cond_pixel_values.append((dest_image, bucket_idx)) self.num_instance_images = len(self.instance_images) self._length = self.num_instance_images self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image, bucket_idx = self.pixel_values[index % self.num_instance_images] example["instance_images"] = instance_image example["bucket_idx"] = bucket_idx if self.cond_pixel_values: dest_image, _ = self.cond_pixel_values[index % self.num_instance_images] example["cond_images"] = dest_image if self.custom_instance_prompts: caption = self.custom_instance_prompts[index % self.num_instance_images] if caption: example["instance_prompt"] = caption else: example["instance_prompt"] = self.instance_prompt else: # custom prompts were provided, but length does not match size of image dataset example["instance_prompt"] = self.instance_prompt return example def paired_transform(self, image, dest_image=None, size=(224, 224), center_crop=False, random_flip=False): # 1. Resize (deterministic) resize = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR) image = resize(image) if dest_image is not None: dest_image = resize(dest_image) # 2. Crop: either center or SAME random crop if center_crop: crop = transforms.CenterCrop(size) image = crop(image) if dest_image is not None: dest_image = crop(dest_image) else: # get_params returns (i, j, h, w) i, j, h, w = transforms.RandomCrop.get_params(image, output_size=size) image = TF.crop(image, i, j, h, w) if dest_image is not None: dest_image = TF.crop(dest_image, i, j, h, w) # 3. Random horizontal flip with the SAME coin flip if random_flip: do_flip = random.random() < 0.5 if do_flip: image = TF.hflip(image) if dest_image is not None: dest_image = TF.hflip(dest_image) # 4. ToTensor + Normalize (deterministic) to_tensor = transforms.ToTensor() normalize = transforms.Normalize([0.5], [0.5]) image = normalize(to_tensor(image)) if dest_image is not None: dest_image = normalize(to_tensor(dest_image)) return (image, dest_image) if dest_image is not None else (image, None) def collate_fn(examples): pixel_values = [example["instance_images"] for example in examples] prompts = [example["instance_prompt"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() batch = {"pixel_values": pixel_values, "prompts": prompts} if any("cond_images" in example for example in examples): cond_pixel_values = [example["cond_images"] for example in examples] cond_pixel_values = torch.stack(cond_pixel_values) cond_pixel_values = cond_pixel_values.to(memory_format=torch.contiguous_format).float() batch.update({"cond_pixel_values": cond_pixel_values}) return batch class BucketBatchSampler(BatchSampler): def __init__(self, dataset: DreamBoothDataset, batch_size: int, drop_last: bool = False): if not isinstance(batch_size, int) or batch_size <= 0: raise ValueError("batch_size should be a positive integer value, but got batch_size={}".format(batch_size)) if not isinstance(drop_last, bool): raise ValueError("drop_last should be a boolean value, but got drop_last={}".format(drop_last)) self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last # Group indices by bucket self.bucket_indices = [[] for _ in range(len(self.dataset.buckets))] for idx, (_, bucket_idx) in enumerate(self.dataset.pixel_values): self.bucket_indices[bucket_idx].append(idx) self.sampler_len = 0 self.batches = [] # Pre-generate batches for each bucket for indices_in_bucket in self.bucket_indices: # Shuffle indices within the bucket random.shuffle(indices_in_bucket) # Create batches for i in range(0, len(indices_in_bucket), self.batch_size): batch = indices_in_bucket[i : i + self.batch_size] if len(batch) < self.batch_size and self.drop_last: continue # Skip partial batch if drop_last is True self.batches.append(batch) self.sampler_len += 1 # Count the number of batches def __iter__(self): # Shuffle the order of the batches each epoch random.shuffle(self.batches) for batch in self.batches: yield batch def __len__(self): return self.sampler_len class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def main(args): if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `hf auth login` to authenticate with the Hub." ) if torch.backends.mps.is_available() and args.mixed_precision == "bf16": # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) if args.do_fp8_training: from torchao.float8 import Float8LinearConfig, convert_to_float8_training logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, kwargs_handlers=[kwargs], ) # Disable AMP for MPS. if torch.backends.mps.is_available(): accelerator.native_amp = False if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, ).repo_id # Load the tokenizers tokenizer = PixtralProcessor.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, ) # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Load scheduler and models noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( args.pretrained_model_name_or_path, subfolder="scheduler", revision=args.revision, ) noise_scheduler_copy = copy.deepcopy(noise_scheduler) vae = AutoencoderKLFlux2.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant, ) latents_bn_mean = vae.bn.running_mean.view(1, -1, 1, 1).to(accelerator.device) latents_bn_std = torch.sqrt(vae.bn.running_var.view(1, -1, 1, 1) + vae.config.batch_norm_eps).to( accelerator.device ) quantization_config = None if args.bnb_quantization_config_path is not None: with open(args.bnb_quantization_config_path, "r") as f: config_kwargs = json.load(f) if "load_in_4bit" in config_kwargs and config_kwargs["load_in_4bit"]: config_kwargs["bnb_4bit_compute_dtype"] = weight_dtype quantization_config = BitsAndBytesConfig(**config_kwargs) transformer = Flux2Transformer2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant, quantization_config=quantization_config, torch_dtype=weight_dtype, ) if args.bnb_quantization_config_path is not None: transformer = prepare_model_for_kbit_training(transformer, use_gradient_checkpointing=False) if not args.remote_text_encoder: text_encoder = Mistral3ForConditionalGeneration.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) text_encoder.requires_grad_(False) # We only train the additional adapter LoRA layers transformer.requires_grad_(False) vae.requires_grad_(False) if args.enable_npu_flash_attention: if is_torch_npu_available(): logger.info("npu flash attention enabled.") transformer.set_attention_backend("_native_npu") else: raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu device ") if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16: # due to pytorch#99272, MPS does not yet support bfloat16. raise ValueError( "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead." ) to_kwargs = {"dtype": weight_dtype, "device": accelerator.device} if not args.offload else {"dtype": weight_dtype} # flux vae is stable in bf16 so load it in weight_dtype to reduce memory vae.to(**to_kwargs) # we never offload the transformer to CPU, so we can just use the accelerator device transformer_to_kwargs = ( {"device": accelerator.device} if args.bnb_quantization_config_path is not None else {"device": accelerator.device, "dtype": weight_dtype} ) is_fsdp = getattr(accelerator.state, "fsdp_plugin", None) is not None if not is_fsdp: transformer.to(**transformer_to_kwargs) if args.do_fp8_training: convert_to_float8_training( transformer, module_filter_fn=module_filter_fn, config=Float8LinearConfig(pad_inner_dim=True) ) if not args.remote_text_encoder: text_encoder.to(**to_kwargs) # Initialize a text encoding pipeline and keep it to CPU for now. text_encoding_pipeline = Flux2Pipeline.from_pretrained( args.pretrained_model_name_or_path, vae=None, transformer=None, tokenizer=tokenizer, text_encoder=text_encoder, scheduler=None, revision=args.revision, ) if args.gradient_checkpointing: transformer.enable_gradient_checkpointing() if args.lora_layers is not None: target_modules = [layer.strip() for layer in args.lora_layers.split(",")] else: target_modules = ["to_k", "to_q", "to_v", "to_out.0"] # now we will add new LoRA weights the transformer layers transformer_lora_config = LoraConfig( r=args.rank, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, init_lora_weights="gaussian", target_modules=target_modules, ) transformer.add_adapter(transformer_lora_config) def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): transformer_cls = type(unwrap_model(transformer)) # 1) Validate and pick the transformer model modules_to_save: dict[str, Any] = {} transformer_model = None for model in models: if isinstance(unwrap_model(model), transformer_cls): transformer_model = model modules_to_save["transformer"] = model else: raise ValueError(f"unexpected save model: {model.__class__}") if transformer_model is None: raise ValueError("No transformer model found in 'models'") # 2) Optionally gather FSDP state dict once state_dict = accelerator.get_state_dict(model) if is_fsdp else None # 3) Only main process materializes the LoRA state dict transformer_lora_layers_to_save = None if accelerator.is_main_process: peft_kwargs = {} if is_fsdp: peft_kwargs["state_dict"] = state_dict transformer_lora_layers_to_save = get_peft_model_state_dict( unwrap_model(transformer_model) if is_fsdp else transformer_model, **peft_kwargs, ) if is_fsdp: transformer_lora_layers_to_save = _to_cpu_contiguous(transformer_lora_layers_to_save) # make sure to pop weight so that corresponding model is not saved again if weights: weights.pop() Flux2Pipeline.save_lora_weights( output_dir, transformer_lora_layers=transformer_lora_layers_to_save, **_collate_lora_metadata(modules_to_save), ) def load_model_hook(models, input_dir): transformer_ = None if not is_fsdp: while len(models) > 0: model = models.pop() if isinstance(unwrap_model(model), type(unwrap_model(transformer))): transformer_ = unwrap_model(model) else: raise ValueError(f"unexpected save model: {model.__class__}") else: transformer_ = Flux2Transformer2DModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="transformer", ) transformer_.add_adapter(transformer_lora_config) lora_state_dict = Flux2Pipeline.lora_state_dict(input_dir) transformer_state_dict = { f"{k.replace('transformer.', '')}": v for k, v in lora_state_dict.items() if k.startswith("transformer.") } transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict) incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default") if incompatible_keys is not None: # check only for unexpected keys unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) if unexpected_keys: logger.warning( f"Loading adapter weights from state_dict led to unexpected keys not found in the model: " f" {unexpected_keys}. " ) # Make sure the trainable params are in float32. This is again needed since the base models # are in `weight_dtype`. More details: # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804 if args.mixed_precision == "fp16": models = [transformer_] # only upcast trainable parameters (LoRA) into fp32 cast_training_params(models) accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32 and torch.cuda.is_available(): torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Make sure the trainable params are in float32. if args.mixed_precision == "fp16": models = [transformer] # only upcast trainable parameters (LoRA) into fp32 cast_training_params(models, dtype=torch.float32) transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters())) # Optimization parameters transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate} params_to_optimize = [transformer_parameters_with_lr] # Optimizer creation if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"): logger.warning( f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]." "Defaulting to adamW" ) args.optimizer = "adamw" if args.use_8bit_adam and not args.optimizer.lower() == "adamw": logger.warning( f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was " f"set to {args.optimizer.lower()}" ) if args.optimizer.lower() == "adamw": if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW optimizer = optimizer_class( params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) if args.optimizer.lower() == "prodigy": try: import prodigyopt except ImportError: raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`") optimizer_class = prodigyopt.Prodigy if args.learning_rate <= 0.1: logger.warning( "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0" ) optimizer = optimizer_class( params_to_optimize, betas=(args.adam_beta1, args.adam_beta2), beta3=args.prodigy_beta3, weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, decouple=args.prodigy_decouple, use_bias_correction=args.prodigy_use_bias_correction, safeguard_warmup=args.prodigy_safeguard_warmup, ) if args.aspect_ratio_buckets is not None: buckets = parse_buckets_string(args.aspect_ratio_buckets) else: buckets = [(args.resolution, args.resolution)] logger.info(f"Using parsed aspect ratio buckets: {buckets}") # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, size=args.resolution, repeats=args.repeats, center_crop=args.center_crop, buckets=buckets, ) batch_sampler = BucketBatchSampler(train_dataset, batch_size=args.train_batch_size, drop_last=True) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_sampler=batch_sampler, collate_fn=lambda examples: collate_fn(examples), num_workers=args.dataloader_num_workers, ) def compute_text_embeddings(prompt, text_encoding_pipeline): with torch.no_grad(): prompt_embeds, text_ids = text_encoding_pipeline.encode_prompt( prompt=prompt, max_sequence_length=args.max_sequence_length ) # prompt_embeds = prompt_embeds.to(accelerator.device) # text_ids = text_ids.to(accelerator.device) return prompt_embeds, text_ids def compute_remote_text_embeddings(prompts: str | list[str]): import io import requests if args.hub_token is not None: hf_token = args.hub_token else: from huggingface_hub import get_token hf_token = get_token() if hf_token is None: raise ValueError( "No HuggingFace token found. To use the remote text encoder please login using `hf auth login` or provide a token using --hub_token" ) def _encode_single(prompt: str): response = requests.post( "https://remote-text-encoder-flux-2.huggingface.co/predict", json={"prompt": prompt}, headers={"Authorization": f"Bearer {hf_token}", "Content-Type": "application/json"}, ) assert response.status_code == 200, f"{response.status_code=}" return torch.load(io.BytesIO(response.content)) try: if isinstance(prompts, (list, tuple)): embeds = [_encode_single(p) for p in prompts] prompt_embeds = torch.cat(embeds, dim=0).to(accelerator.device) else: prompt_embeds = _encode_single(prompts).to(accelerator.device) text_ids = Flux2Pipeline._prepare_text_ids(prompt_embeds).to(accelerator.device) return prompt_embeds, text_ids except Exception as e: raise RuntimeError("Remote text encoder inference failed.") from e # If no type of tuning is done on the text_encoder and custom instance prompts are NOT # provided (i.e. the --instance_prompt is used for all images), we encode the instance prompt once to avoid # the redundant encoding. if not train_dataset.custom_instance_prompts: if args.remote_text_encoder: instance_prompt_hidden_states, instance_text_ids = compute_remote_text_embeddings(args.instance_prompt) else: with offload_models(text_encoding_pipeline, device=accelerator.device, offload=args.offload): instance_prompt_hidden_states, instance_text_ids = compute_text_embeddings( args.instance_prompt, text_encoding_pipeline ) if args.validation_prompt is not None: validation_image = load_image(args.validation_image_path).convert("RGB") validation_kwargs = {"image": validation_image} if args.remote_text_encoder: validation_kwargs["prompt_embeds"] = compute_remote_text_embeddings(args.validation_prompt) else: with offload_models(text_encoding_pipeline, device=accelerator.device, offload=args.offload): validation_kwargs["prompt_embeds"] = compute_text_embeddings( args.validation_prompt, text_encoding_pipeline ) # Init FSDP for text encoder if args.fsdp_text_encoder: fsdp_kwargs = get_fsdp_kwargs_from_accelerator(accelerator) text_encoder_fsdp = wrap_with_fsdp( model=text_encoding_pipeline.text_encoder, device=accelerator.device, offload=args.offload, limit_all_gathers=True, use_orig_params=True, fsdp_kwargs=fsdp_kwargs, ) text_encoding_pipeline.text_encoder = text_encoder_fsdp dist.barrier() # If custom instance prompts are NOT provided (i.e. the instance prompt is used for all images), # pack the statically computed variables appropriately here. This is so that we don't # have to pass them to the dataloader. if not train_dataset.custom_instance_prompts: prompt_embeds = instance_prompt_hidden_states text_ids = instance_text_ids # if cache_latents is set to True, we encode images to latents and store them. # Similar to pre-encoding in the case of a single instance prompt, if custom prompts are provided # we encode them in advance as well. precompute_latents = args.cache_latents or train_dataset.custom_instance_prompts if precompute_latents: prompt_embeds_cache = [] text_ids_cache = [] latents_cache = [] cond_latents_cache = [] for batch in tqdm(train_dataloader, desc="Caching latents"): with torch.no_grad(): if args.cache_latents: with offload_models(vae, device=accelerator.device, offload=args.offload): batch["pixel_values"] = batch["pixel_values"].to( accelerator.device, non_blocking=True, dtype=vae.dtype ) latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) batch["cond_pixel_values"] = batch["cond_pixel_values"].to( accelerator.device, non_blocking=True, dtype=vae.dtype ) cond_latents_cache.append(vae.encode(batch["cond_pixel_values"]).latent_dist) if train_dataset.custom_instance_prompts: if args.remote_text_encoder: prompt_embeds, text_ids = compute_remote_text_embeddings(batch["prompts"]) elif args.fsdp_text_encoder: prompt_embeds, text_ids = compute_text_embeddings(batch["prompts"], text_encoding_pipeline) else: with offload_models(text_encoding_pipeline, device=accelerator.device, offload=args.offload): prompt_embeds, text_ids = compute_text_embeddings(batch["prompts"], text_encoding_pipeline) prompt_embeds_cache.append(prompt_embeds) text_ids_cache.append(text_ids) # move back to cpu before deleting to ensure memory is freed see: https://github.com/huggingface/diffusers/issues/11376#issue-3008144624 if args.cache_latents: vae = vae.to("cpu") del vae # move back to cpu before deleting to ensure memory is freed see: https://github.com/huggingface/diffusers/issues/11376#issue-3008144624 if not args.remote_text_encoder: text_encoding_pipeline = text_encoding_pipeline.to("cpu") del text_encoder, tokenizer free_memory() # Scheduler and math around the number of training steps. # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation. num_warmup_steps_for_scheduler = args.lr_warmup_steps * accelerator.num_processes if args.max_train_steps is None: len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes) num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps) num_training_steps_for_scheduler = ( args.num_train_epochs * accelerator.num_processes * num_update_steps_per_epoch ) else: num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=num_warmup_steps_for_scheduler, num_training_steps=num_training_steps_for_scheduler, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( transformer, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch if num_training_steps_for_scheduler != args.max_train_steps: logger.warning( f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match " f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. " f"This inconsistency may result in the learning rate scheduler not functioning properly." ) # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_name = "dreambooth-flux2-image2img-lora" accelerator.init_trackers(tracker_name, config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype) schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device) timesteps = timesteps.to(accelerator.device) step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] sigma = sigmas[step_indices].flatten() while len(sigma.shape) < n_dim: sigma = sigma.unsqueeze(-1) return sigma for epoch in range(first_epoch, args.num_train_epochs): transformer.train() for step, batch in enumerate(train_dataloader): models_to_accumulate = [transformer] prompts = batch["prompts"] with accelerator.accumulate(models_to_accumulate): if train_dataset.custom_instance_prompts: prompt_embeds = prompt_embeds_cache[step] text_ids = text_ids_cache[step] else: num_repeat_elements = len(prompts) prompt_embeds = prompt_embeds.repeat(num_repeat_elements, 1, 1) text_ids = text_ids.repeat(num_repeat_elements, 1, 1) # Convert images to latent space if args.cache_latents: model_input = latents_cache[step].mode() cond_model_input = cond_latents_cache[step].mode() else: with offload_models(vae, device=accelerator.device, offload=args.offload): pixel_values = batch["pixel_values"].to(dtype=vae.dtype) cond_pixel_values = batch["cond_pixel_values"].to(dtype=vae.dtype) model_input = vae.encode(pixel_values).latent_dist.mode() cond_model_input = vae.encode(cond_pixel_values).latent_dist.mode() # model_input = Flux2Pipeline._encode_vae_image(pixel_values) model_input = Flux2Pipeline._patchify_latents(model_input) model_input = (model_input - latents_bn_mean) / latents_bn_std cond_model_input = Flux2Pipeline._patchify_latents(cond_model_input) cond_model_input = (cond_model_input - latents_bn_mean) / latents_bn_std model_input_ids = Flux2Pipeline._prepare_latent_ids(model_input).to(device=model_input.device) cond_model_input_list = [cond_model_input[i].unsqueeze(0) for i in range(cond_model_input.shape[0])] cond_model_input_ids = Flux2Pipeline._prepare_image_ids(cond_model_input_list).to( device=cond_model_input.device ) cond_model_input_ids = cond_model_input_ids.view( cond_model_input.shape[0], -1, model_input_ids.shape[-1] ) # Sample noise that we'll add to the latents noise = torch.randn_like(model_input) bsz = model_input.shape[0] # Sample a random timestep for each image # for weighting schemes where we sample timesteps non-uniformly u = compute_density_for_timestep_sampling( weighting_scheme=args.weighting_scheme, batch_size=bsz, logit_mean=args.logit_mean, logit_std=args.logit_std, mode_scale=args.mode_scale, ) indices = (u * noise_scheduler_copy.config.num_train_timesteps).long() timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device) # Add noise according to flow matching. # zt = (1 - texp) * x + texp * z1 sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype) noisy_model_input = (1.0 - sigmas) * model_input + sigmas * noise # [B, C, H, W] -> [B, H*W, C] packed_noisy_model_input = Flux2Pipeline._pack_latents(noisy_model_input) packed_cond_model_input = Flux2Pipeline._pack_latents(cond_model_input) orig_input_shape = packed_noisy_model_input.shape orig_input_ids_shape = model_input_ids.shape # concatenate the model inputs with the cond inputs packed_noisy_model_input = torch.cat([packed_noisy_model_input, packed_cond_model_input], dim=1) model_input_ids = torch.cat([model_input_ids, cond_model_input_ids], dim=1) # handle guidance guidance = torch.full([1], args.guidance_scale, device=accelerator.device) guidance = guidance.expand(model_input.shape[0]) # Predict the noise residual model_pred = transformer( hidden_states=packed_noisy_model_input, # (B, image_seq_len, C) timestep=timesteps / 1000, guidance=guidance, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, # B, text_seq_len, 4 img_ids=model_input_ids, # B, image_seq_len, 4 return_dict=False, )[0] model_pred = model_pred[:, : orig_input_shape[1], :] model_input_ids = model_input_ids[:, : orig_input_ids_shape[1], :] model_pred = Flux2Pipeline._unpack_latents_with_ids(model_pred, model_input_ids) # these weighting schemes use a uniform timestep sampling # and instead post-weight the loss weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) # flow matching loss target = noise - model_input # Compute regular loss. loss = torch.mean( (weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1), 1, ) loss = loss.mean() accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = transformer.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if accelerator.is_main_process or is_fsdp: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: # create pipeline pipeline = Flux2Pipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=None, tokenizer=None, transformer=unwrap_model(transformer), revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=validation_kwargs, epoch=epoch, torch_dtype=weight_dtype, ) del pipeline free_memory() # Save the lora layers accelerator.wait_for_everyone() if is_fsdp: transformer = unwrap_model(transformer) state_dict = accelerator.get_state_dict(transformer) if accelerator.is_main_process: modules_to_save = {} if is_fsdp: if args.bnb_quantization_config_path is None: if args.upcast_before_saving: state_dict = { k: v.to(torch.float32) if isinstance(v, torch.Tensor) else v for k, v in state_dict.items() } else: state_dict = { k: v.to(weight_dtype) if isinstance(v, torch.Tensor) else v for k, v in state_dict.items() } transformer_lora_layers = get_peft_model_state_dict( transformer, state_dict=state_dict, ) transformer_lora_layers = { k: v.detach().cpu().contiguous() if isinstance(v, torch.Tensor) else v for k, v in transformer_lora_layers.items() } else: transformer = unwrap_model(transformer) if args.bnb_quantization_config_path is None: if args.upcast_before_saving: transformer.to(torch.float32) else: transformer = transformer.to(weight_dtype) transformer_lora_layers = get_peft_model_state_dict(transformer) modules_to_save["transformer"] = transformer Flux2Pipeline.save_lora_weights( save_directory=args.output_dir, transformer_lora_layers=transformer_lora_layers, **_collate_lora_metadata(modules_to_save), ) images = [] run_validation = (args.validation_prompt and args.num_validation_images > 0) or (args.final_validation_prompt) should_run_final_inference = not args.skip_final_inference and run_validation if should_run_final_inference: pipeline = Flux2Pipeline.from_pretrained( args.pretrained_model_name_or_path, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) # load attention processors pipeline.load_lora_weights(args.output_dir) # run inference images = [] if args.validation_prompt and args.num_validation_images > 0: images = log_validation( pipeline=pipeline, args=args, accelerator=accelerator, pipeline_args=validation_kwargs, epoch=epoch, is_final_validation=True, torch_dtype=weight_dtype, ) del pipeline free_memory() validation_prompt = args.validation_prompt if args.validation_prompt else args.final_validation_prompt save_model_card( (args.hub_model_id or Path(args.output_dir).name) if not args.push_to_hub else repo_id, images=images, base_model=args.pretrained_model_name_or_path, instance_prompt=args.instance_prompt, validation_prompt=validation_prompt, repo_folder=args.output_dir, fp8_training=args.do_fp8_training, ) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/dreambooth/train_dreambooth_lora_flux2_img2img.py", "license": "Apache License 2.0", "lines": 1698, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:scripts/convert_flux2_to_diffusers.py
import argparse from contextlib import nullcontext from typing import Any, Dict, Tuple import safetensors.torch import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download from transformers import AutoProcessor, GenerationConfig, Mistral3ForConditionalGeneration from diffusers import AutoencoderKLFlux2, FlowMatchEulerDiscreteScheduler, Flux2Pipeline, Flux2Transformer2DModel from diffusers.utils.import_utils import is_accelerate_available """ # VAE python scripts/convert_flux2_to_diffusers.py \ --original_state_dict_repo_id "diffusers-internal-dev/new-model-image" \ --vae_filename "flux2-vae.sft" \ --output_path "/raid/yiyi/dummy-flux2-diffusers" \ --vae # DiT python scripts/convert_flux2_to_diffusers.py \ --original_state_dict_repo_id diffusers-internal-dev/new-model-image \ --dit_filename flux-dev-dummy.sft \ --dit \ --output_path . # Full pipe python scripts/convert_flux2_to_diffusers.py \ --original_state_dict_repo_id diffusers-internal-dev/new-model-image \ --dit_filename flux-dev-dummy.sft \ --vae_filename "flux2-vae.sft" \ --dit --vae --full_pipe \ --output_path . """ CTX = init_empty_weights if is_accelerate_available() else nullcontext parser = argparse.ArgumentParser() parser.add_argument("--original_state_dict_repo_id", default=None, type=str) parser.add_argument("--vae_filename", default="flux2-vae.sft", type=str) parser.add_argument("--dit_filename", default="flux2-dev.safetensors", type=str) parser.add_argument("--vae", action="store_true") parser.add_argument("--dit", action="store_true") parser.add_argument("--vae_dtype", type=str, default="fp32") parser.add_argument("--dit_dtype", type=str, default="bf16") parser.add_argument("--checkpoint_path", default=None, type=str) parser.add_argument("--full_pipe", action="store_true") parser.add_argument("--output_path", type=str) args = parser.parse_args() def load_original_checkpoint(args, filename): if args.original_state_dict_repo_id is not None: ckpt_path = hf_hub_download(repo_id=args.original_state_dict_repo_id, filename=filename) elif args.checkpoint_path is not None: ckpt_path = args.checkpoint_path else: raise ValueError(" please provide either `original_state_dict_repo_id` or a local `checkpoint_path`") original_state_dict = safetensors.torch.load_file(ckpt_path) return original_state_dict DIFFUSERS_VAE_TO_FLUX2_MAPPING = { "encoder.conv_in.weight": "encoder.conv_in.weight", "encoder.conv_in.bias": "encoder.conv_in.bias", "encoder.conv_out.weight": "encoder.conv_out.weight", "encoder.conv_out.bias": "encoder.conv_out.bias", "encoder.conv_norm_out.weight": "encoder.norm_out.weight", "encoder.conv_norm_out.bias": "encoder.norm_out.bias", "decoder.conv_in.weight": "decoder.conv_in.weight", "decoder.conv_in.bias": "decoder.conv_in.bias", "decoder.conv_out.weight": "decoder.conv_out.weight", "decoder.conv_out.bias": "decoder.conv_out.bias", "decoder.conv_norm_out.weight": "decoder.norm_out.weight", "decoder.conv_norm_out.bias": "decoder.norm_out.bias", "quant_conv.weight": "encoder.quant_conv.weight", "quant_conv.bias": "encoder.quant_conv.bias", "post_quant_conv.weight": "decoder.post_quant_conv.weight", "post_quant_conv.bias": "decoder.post_quant_conv.bias", "bn.running_mean": "bn.running_mean", "bn.running_var": "bn.running_var", } # Copied from diffusers.pipelines.stable_diffusion.convert_from_ckpt.conv_attn_to_linear def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ["query.weight", "key.weight", "value.weight"] for key in keys: if ".".join(key.split(".")[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif "proj_attn.weight" in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def update_vae_resnet_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): for ldm_key in keys: diffusers_key = ldm_key.replace(mapping["old"], mapping["new"]).replace("nin_shortcut", "conv_shortcut") new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) def update_vae_attentions_ldm_to_diffusers(keys, new_checkpoint, checkpoint, mapping): for ldm_key in keys: diffusers_key = ( ldm_key.replace(mapping["old"], mapping["new"]) .replace("norm.weight", "group_norm.weight") .replace("norm.bias", "group_norm.bias") .replace("q.weight", "to_q.weight") .replace("q.bias", "to_q.bias") .replace("k.weight", "to_k.weight") .replace("k.bias", "to_k.bias") .replace("v.weight", "to_v.weight") .replace("v.bias", "to_v.bias") .replace("proj_out.weight", "to_out.0.weight") .replace("proj_out.bias", "to_out.0.bias") ) new_checkpoint[diffusers_key] = checkpoint.get(ldm_key) # proj_attn.weight has to be converted from conv 1D to linear shape = new_checkpoint[diffusers_key].shape if len(shape) == 3: new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0] elif len(shape) == 4: new_checkpoint[diffusers_key] = new_checkpoint[diffusers_key][:, :, 0, 0] def convert_flux2_vae_checkpoint_to_diffusers(vae_state_dict, config): new_checkpoint = {} for diffusers_key, ldm_key in DIFFUSERS_VAE_TO_FLUX2_MAPPING.items(): if ldm_key not in vae_state_dict: continue new_checkpoint[diffusers_key] = vae_state_dict[ldm_key] # Retrieves the keys for the encoder down blocks only num_down_blocks = len(config["down_block_types"]) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] update_vae_resnet_ldm_to_diffusers( resnets, new_checkpoint, vae_state_dict, mapping={"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}, ) if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.get( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.get( f"encoder.down.{i}.downsample.conv.bias" ) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] update_vae_resnet_ldm_to_diffusers( resnets, new_checkpoint, vae_state_dict, mapping={"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}, ) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] update_vae_attentions_ldm_to_diffusers( mid_attentions, new_checkpoint, vae_state_dict, mapping={"old": "mid.attn_1", "new": "mid_block.attentions.0"} ) # Retrieves the keys for the decoder up blocks only num_up_blocks = len(config["up_block_types"]) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] update_vae_resnet_ldm_to_diffusers( resnets, new_checkpoint, vae_state_dict, mapping={"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}, ) if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] update_vae_resnet_ldm_to_diffusers( resnets, new_checkpoint, vae_state_dict, mapping={"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}, ) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] update_vae_attentions_ldm_to_diffusers( mid_attentions, new_checkpoint, vae_state_dict, mapping={"old": "mid.attn_1", "new": "mid_block.attentions.0"} ) conv_attn_to_linear(new_checkpoint) return new_checkpoint FLUX2_TRANSFORMER_KEYS_RENAME_DICT = { # Image and text input projections "img_in": "x_embedder", "txt_in": "context_embedder", # Timestep and guidance embeddings "time_in.in_layer": "time_guidance_embed.timestep_embedder.linear_1", "time_in.out_layer": "time_guidance_embed.timestep_embedder.linear_2", "guidance_in.in_layer": "time_guidance_embed.guidance_embedder.linear_1", "guidance_in.out_layer": "time_guidance_embed.guidance_embedder.linear_2", # Modulation parameters "double_stream_modulation_img.lin": "double_stream_modulation_img.linear", "double_stream_modulation_txt.lin": "double_stream_modulation_txt.linear", "single_stream_modulation.lin": "single_stream_modulation.linear", # Final output layer # "final_layer.adaLN_modulation.1": "norm_out.linear", # Handle separately since we need to swap mod params "final_layer.linear": "proj_out", } FLUX2_TRANSFORMER_ADA_LAYER_NORM_KEY_MAP = { "final_layer.adaLN_modulation.1": "norm_out.linear", } FLUX2_TRANSFORMER_DOUBLE_BLOCK_KEY_MAP = { # Handle fused QKV projections separately as we need to break into Q, K, V projections "img_attn.norm.query_norm": "attn.norm_q", "img_attn.norm.key_norm": "attn.norm_k", "img_attn.proj": "attn.to_out.0", "img_mlp.0": "ff.linear_in", "img_mlp.2": "ff.linear_out", "txt_attn.norm.query_norm": "attn.norm_added_q", "txt_attn.norm.key_norm": "attn.norm_added_k", "txt_attn.proj": "attn.to_add_out", "txt_mlp.0": "ff_context.linear_in", "txt_mlp.2": "ff_context.linear_out", } FLUX2_TRANSFORMER_SINGLE_BLOCK_KEY_MAP = { "linear1": "attn.to_qkv_mlp_proj", "linear2": "attn.to_out", "norm.query_norm": "attn.norm_q", "norm.key_norm": "attn.norm_k", } # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use # diffusers implementation def swap_scale_shift(weight): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def convert_ada_layer_norm_weights(key: str, state_dict: Dict[str, Any]) -> None: # Skip if not a weight if ".weight" not in key: return # If adaLN_modulation is in the key, swap scale and shift parameters # Original implementation is (shift, scale); diffusers implementation is (scale, shift) if "adaLN_modulation" in key: key_without_param_type, param_type = key.rsplit(".", maxsplit=1) # Assume all such keys are in the AdaLayerNorm key map new_key_without_param_type = FLUX2_TRANSFORMER_ADA_LAYER_NORM_KEY_MAP[key_without_param_type] new_key = ".".join([new_key_without_param_type, param_type]) swapped_weight = swap_scale_shift(state_dict.pop(key)) state_dict[new_key] = swapped_weight return def convert_flux2_double_stream_blocks(key: str, state_dict: Dict[str, Any]) -> None: # Skip if not a weight, bias, or scale if ".weight" not in key and ".bias" not in key and ".scale" not in key: return new_prefix = "transformer_blocks" if "double_blocks." in key: parts = key.split(".") block_idx = parts[1] modality_block_name = parts[2] # img_attn, img_mlp, txt_attn, txt_mlp within_block_name = ".".join(parts[2:-1]) param_type = parts[-1] if param_type == "scale": param_type = "weight" if "qkv" in within_block_name: fused_qkv_weight = state_dict.pop(key) to_q_weight, to_k_weight, to_v_weight = torch.chunk(fused_qkv_weight, 3, dim=0) if "img" in modality_block_name: # double_blocks.{N}.img_attn.qkv --> transformer_blocks.{N}.attn.{to_q|to_k|to_v} to_q_weight, to_k_weight, to_v_weight = torch.chunk(fused_qkv_weight, 3, dim=0) new_q_name = "attn.to_q" new_k_name = "attn.to_k" new_v_name = "attn.to_v" elif "txt" in modality_block_name: # double_blocks.{N}.txt_attn.qkv --> transformer_blocks.{N}.attn.{add_q_proj|add_k_proj|add_v_proj} to_q_weight, to_k_weight, to_v_weight = torch.chunk(fused_qkv_weight, 3, dim=0) new_q_name = "attn.add_q_proj" new_k_name = "attn.add_k_proj" new_v_name = "attn.add_v_proj" new_q_key = ".".join([new_prefix, block_idx, new_q_name, param_type]) new_k_key = ".".join([new_prefix, block_idx, new_k_name, param_type]) new_v_key = ".".join([new_prefix, block_idx, new_v_name, param_type]) state_dict[new_q_key] = to_q_weight state_dict[new_k_key] = to_k_weight state_dict[new_v_key] = to_v_weight else: new_within_block_name = FLUX2_TRANSFORMER_DOUBLE_BLOCK_KEY_MAP[within_block_name] new_key = ".".join([new_prefix, block_idx, new_within_block_name, param_type]) param = state_dict.pop(key) state_dict[new_key] = param return def convert_flux2_single_stream_blocks(key: str, state_dict: Dict[str, Any]) -> None: # Skip if not a weight, bias, or scale if ".weight" not in key and ".bias" not in key and ".scale" not in key: return # Mapping: # - single_blocks.{N}.linear1 --> single_transformer_blocks.{N}.attn.to_qkv_mlp_proj # - single_blocks.{N}.linear2 --> single_transformer_blocks.{N}.attn.to_out # - single_blocks.{N}.norm.query_norm.scale --> single_transformer_blocks.{N}.attn.norm_q.weight # - single_blocks.{N}.norm.key_norm.scale --> single_transformer_blocks.{N}.attn.norm_k.weight new_prefix = "single_transformer_blocks" if "single_blocks." in key: parts = key.split(".") block_idx = parts[1] within_block_name = ".".join(parts[2:-1]) param_type = parts[-1] if param_type == "scale": param_type = "weight" new_within_block_name = FLUX2_TRANSFORMER_SINGLE_BLOCK_KEY_MAP[within_block_name] new_key = ".".join([new_prefix, block_idx, new_within_block_name, param_type]) param = state_dict.pop(key) state_dict[new_key] = param return TRANSFORMER_SPECIAL_KEYS_REMAP = { "adaLN_modulation": convert_ada_layer_norm_weights, "double_blocks": convert_flux2_double_stream_blocks, "single_blocks": convert_flux2_single_stream_blocks, } def update_state_dict(state_dict: Dict[str, Any], old_key: str, new_key: str) -> None: state_dict[new_key] = state_dict.pop(old_key) def get_flux2_transformer_config(model_type: str) -> Tuple[Dict[str, Any], ...]: if model_type == "flux2-dev": config = { "model_id": "black-forest-labs/FLUX.2-dev", "diffusers_config": { "patch_size": 1, "in_channels": 128, "num_layers": 8, "num_single_layers": 48, "attention_head_dim": 128, "num_attention_heads": 48, "joint_attention_dim": 15360, "timestep_guidance_channels": 256, "mlp_ratio": 3.0, "axes_dims_rope": (32, 32, 32, 32), "rope_theta": 2000, "eps": 1e-6, }, } rename_dict = FLUX2_TRANSFORMER_KEYS_RENAME_DICT special_keys_remap = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "klein-4b": config = { "model_id": "diffusers-internal-dev/dummy0115", "diffusers_config": { "patch_size": 1, "in_channels": 128, "num_layers": 5, "num_single_layers": 20, "attention_head_dim": 128, "num_attention_heads": 24, "joint_attention_dim": 7680, "timestep_guidance_channels": 256, "mlp_ratio": 3.0, "axes_dims_rope": (32, 32, 32, 32), "rope_theta": 2000, "eps": 1e-6, "guidance_embeds": False, }, } rename_dict = FLUX2_TRANSFORMER_KEYS_RENAME_DICT special_keys_remap = TRANSFORMER_SPECIAL_KEYS_REMAP elif model_type == "klein-9b": config = { "model_id": "diffusers-internal-dev/dummy0115", "diffusers_config": { "patch_size": 1, "in_channels": 128, "num_layers": 8, "num_single_layers": 24, "attention_head_dim": 128, "num_attention_heads": 32, "joint_attention_dim": 12288, "timestep_guidance_channels": 256, "mlp_ratio": 3.0, "axes_dims_rope": (32, 32, 32, 32), "rope_theta": 2000, "eps": 1e-6, "guidance_embeds": False, }, } rename_dict = FLUX2_TRANSFORMER_KEYS_RENAME_DICT special_keys_remap = TRANSFORMER_SPECIAL_KEYS_REMAP else: raise ValueError(f"Unknown model_type: {model_type}. Choose from: flux2-dev, klein-4b, klein-9b") return config, rename_dict, special_keys_remap def convert_flux2_transformer_to_diffusers(original_state_dict: Dict[str, torch.Tensor], model_type: str): config, rename_dict, special_keys_remap = get_flux2_transformer_config(model_type) diffusers_config = config["diffusers_config"] with init_empty_weights(): transformer = Flux2Transformer2DModel.from_config(diffusers_config) # Handle official code --> diffusers key remapping via the remap dict for key in list(original_state_dict.keys()): new_key = key[:] for replace_key, rename_key in rename_dict.items(): new_key = new_key.replace(replace_key, rename_key) update_state_dict(original_state_dict, key, new_key) # Handle any special logic which can't be expressed by a simple 1:1 remapping with the handlers in # special_keys_remap for key in list(original_state_dict.keys()): for special_key, handler_fn_inplace in special_keys_remap.items(): if special_key not in key: continue handler_fn_inplace(key, original_state_dict) transformer.load_state_dict(original_state_dict, strict=True, assign=True) return transformer def main(args): if args.vae: original_vae_ckpt = load_original_checkpoint(args, filename=args.vae_filename) vae = AutoencoderKLFlux2() converted_vae_state_dict = convert_flux2_vae_checkpoint_to_diffusers(original_vae_ckpt, vae.config) vae.load_state_dict(converted_vae_state_dict, strict=True) if not args.full_pipe: vae_dtype = torch.bfloat16 if args.vae_dtype == "bf16" else torch.float32 vae.to(vae_dtype).save_pretrained(f"{args.output_path}/vae") if args.dit: original_dit_ckpt = load_original_checkpoint(args, filename=args.dit_filename) if "klein-4b" in args.dit_filename: model_type = "klein-4b" elif "klein-9b" in args.dit_filename: model_type = "klein-9b" else: model_type = "flux2-dev" transformer = convert_flux2_transformer_to_diffusers(original_dit_ckpt, model_type) if not args.full_pipe: dit_dtype = torch.bfloat16 if args.dit_dtype == "bf16" else torch.float32 transformer.to(dit_dtype).save_pretrained(f"{args.output_path}/transformer") if args.full_pipe: tokenizer_id = "mistralai/Mistral-Small-3.1-24B-Instruct-2503" text_encoder_id = "mistralai/Mistral-Small-3.2-24B-Instruct-2506" generate_config = GenerationConfig.from_pretrained(text_encoder_id) generate_config.do_sample = True text_encoder = Mistral3ForConditionalGeneration.from_pretrained( text_encoder_id, generation_config=generate_config, torch_dtype=torch.bfloat16 ) tokenizer = AutoProcessor.from_pretrained(tokenizer_id) scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained( "black-forest-labs/FLUX.1-dev", subfolder="scheduler" ) if_distilled = "base" not in args.dit_filename pipe = Flux2Pipeline( vae=vae, transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, if_distilled=if_distilled, ) pipe.save_pretrained(args.output_path) if __name__ == "__main__": main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "scripts/convert_flux2_to_diffusers.py", "license": "Apache License 2.0", "lines": 449, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:src/diffusers/models/autoencoders/autoencoder_kl_flux2.py
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...loaders.single_file_model import FromOriginalModelMixin from ...utils import deprecate from ...utils.accelerate_utils import apply_forward_hook from ..attention import AttentionMixin from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0, ) from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import AutoencoderMixin, Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder class AutoencoderKLFlux2( ModelMixin, AutoencoderMixin, AttentionMixin, ConfigMixin, FromOriginalModelMixin, PeftAdapterMixin ): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. down_block_types (`tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): Tuple of downsample block types. up_block_types (`tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): Tuple of upsample block types. block_out_channels (`tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. sample_size (`int`, *optional*, defaults to `32`): Sample input size. force_upcast (`bool`, *optional*, default to `True`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without losing too much precision in which case `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix mid_block_add_attention (`bool`, *optional*, default to `True`): If enabled, the mid_block of the Encoder and Decoder will have attention blocks. If set to false, the mid_block will only have resnet blocks """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D"] @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, down_block_types: tuple[str, ...] = ( "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ), up_block_types: tuple[str, ...] = ( "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", ), block_out_channels: tuple[int, ...] = ( 128, 256, 512, 512, ), layers_per_block: int = 2, act_fn: str = "silu", latent_channels: int = 32, norm_num_groups: int = 32, sample_size: int = 1024, # YiYi notes: not sure force_upcast: bool = True, use_quant_conv: bool = True, use_post_quant_conv: bool = True, mid_block_add_attention: bool = True, batch_norm_eps: float = 1e-4, batch_norm_momentum: float = 0.1, patch_size: tuple[int, int] = (2, 2), ): super().__init__() # pass init params to Encoder self.encoder = Encoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, double_z=True, mid_block_add_attention=mid_block_add_attention, ) # pass init params to Decoder self.decoder = Decoder( in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, mid_block_add_attention=mid_block_add_attention, ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) if use_quant_conv else None self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) if use_post_quant_conv else None self.bn = nn.BatchNorm2d( math.prod(patch_size) * latent_channels, eps=batch_norm_eps, momentum=batch_norm_momentum, affine=False, track_running_stats=True, ) self.use_slicing = False self.use_tiling = False # only relevant if vae tiling is enabled self.tile_sample_min_size = self.config.sample_size sample_size = ( self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size ) self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) self.tile_overlap_factor = 0.25 # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def _encode(self, x: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = x.shape if self.use_tiling and (width > self.tile_sample_min_size or height > self.tile_sample_min_size): return self._tiled_encode(x) enc = self.encoder(x) if self.quant_conv is not None: enc = self.quant_conv(enc) return enc @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> AutoencoderKLOutput | tuple[DiagonalGaussianDistribution]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(z, return_dict=return_dict) if self.post_quant_conv is not None: z = self.post_quant_conv(z) dec = self.decoder(z) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode( self, z: torch.FloatTensor, return_dict: bool = True, generator=None ) -> DecoderOutput | torch.FloatTensor: """ Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[2], b.shape[2], blend_extent) for y in range(blend_extent): b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for x in range(blend_extent): b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def _tiled_encode(self, x: torch.Tensor) -> torch.Tensor: r"""Encode a batch of images using a tiled encoder. When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the output, but they should be much less noticeable. Args: x (`torch.Tensor`): Input batch of images. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] tile = self.encoder(tile) if self.config.use_quant_conv: tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) enc = torch.cat(result_rows, dim=2) return enc def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> AutoencoderKLOutput: r"""Encode a batch of images using a tiled encoder. When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the output, but they should be much less noticeable. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`: If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ deprecation_message = ( "The tiled_encode implementation supporting the `return_dict` parameter is deprecated. In the future, the " "implementation of this method will be replaced with that of `_tiled_encode` and you will no longer be able " "to pass `return_dict`. You will also have to create a `DiagonalGaussianDistribution()` from the returned value." ) deprecate("tiled_encode", "1.0.0", deprecation_message, standard_warn=False) overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] tile = self.encoder(tile) if self.config.use_quant_conv: tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) moments = torch.cat(result_rows, dim=2) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) row_limit = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, z.shape[2], overlap_size): row = [] for j in range(0, z.shape[3], overlap_size): tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] if self.config.use_post_quant_conv: tile = self.post_quant_conv(tile) decoded = self.decoder(tile) row.append(decoded) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) dec = torch.cat(result_rows, dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: torch.Generator | None = None, ) -> DecoderOutput | torch.Tensor: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. > [!WARNING] > This API is 🧪 experimental. """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. > [!WARNING] > This API is 🧪 experimental. """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/autoencoders/autoencoder_kl_flux2.py", "license": "Apache License 2.0", "lines": 414, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/models/transformers/transformer_flux2.py
# Copyright 2025 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin from ...utils import apply_lora_scale, logging from .._modeling_parallel import ContextParallelInput, ContextParallelOutput from ..attention import AttentionMixin, AttentionModuleMixin from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import ( TimestepEmbedding, Timesteps, apply_rotary_emb, get_1d_rotary_pos_embed, ) from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormContinuous logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _get_projections(attn: "Flux2Attention", hidden_states, encoder_hidden_states=None): query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) encoder_query = encoder_key = encoder_value = None if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) return query, key, value, encoder_query, encoder_key, encoder_value def _get_fused_projections(attn: "Flux2Attention", hidden_states, encoder_hidden_states=None): query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) encoder_query = encoder_key = encoder_value = (None,) if encoder_hidden_states is not None and hasattr(attn, "to_added_qkv"): encoder_query, encoder_key, encoder_value = attn.to_added_qkv(encoder_hidden_states).chunk(3, dim=-1) return query, key, value, encoder_query, encoder_key, encoder_value def _get_qkv_projections(attn: "Flux2Attention", hidden_states, encoder_hidden_states=None): if attn.fused_projections: return _get_fused_projections(attn, hidden_states, encoder_hidden_states) return _get_projections(attn, hidden_states, encoder_hidden_states) class Flux2SwiGLU(nn.Module): """ Flux 2 uses a SwiGLU-style activation in the transformer feedforward sub-blocks, but with the linear projection layer fused into the first linear layer of the FF sub-block. Thus, this module has no trainable parameters. """ def __init__(self): super().__init__() self.gate_fn = nn.SiLU() def forward(self, x: torch.Tensor) -> torch.Tensor: x1, x2 = x.chunk(2, dim=-1) x = self.gate_fn(x1) * x2 return x class Flux2FeedForward(nn.Module): def __init__( self, dim: int, dim_out: int | None = None, mult: float = 3.0, inner_dim: int | None = None, bias: bool = False, ): super().__init__() if inner_dim is None: inner_dim = int(dim * mult) dim_out = dim_out or dim # Flux2SwiGLU will reduce the dimension by half self.linear_in = nn.Linear(dim, inner_dim * 2, bias=bias) self.act_fn = Flux2SwiGLU() self.linear_out = nn.Linear(inner_dim, dim_out, bias=bias) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.linear_in(x) x = self.act_fn(x) x = self.linear_out(x) return x class Flux2AttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.") def __call__( self, attn: "Flux2Attention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( attn, hidden_states, encoder_hidden_states ) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) query = attn.norm_q(query) key = attn.norm_k(key) if attn.added_kv_proj_dim is not None: encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) encoder_query = attn.norm_added_q(encoder_query) encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([encoder_query, query], dim=1) key = torch.cat([encoder_key, key], dim=1) value = torch.cat([encoder_value, value], dim=1) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, backend=self._attention_backend, parallel_config=self._parallel_config, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 ) encoder_hidden_states = attn.to_add_out(encoder_hidden_states) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) if encoder_hidden_states is not None: return hidden_states, encoder_hidden_states else: return hidden_states class Flux2Attention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = Flux2AttnProcessor _available_processors = [Flux2AttnProcessor] def __init__( self, query_dim: int, heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, added_kv_proj_dim: int | None = None, added_proj_bias: bool | None = True, out_bias: bool = True, eps: float = 1e-5, out_dim: int = None, elementwise_affine: bool = True, processor=None, ): super().__init__() self.head_dim = dim_head self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.out_dim = out_dim if out_dim is not None else query_dim self.heads = out_dim // dim_head if out_dim is not None else heads self.use_bias = bias self.dropout = dropout self.added_kv_proj_dim = added_kv_proj_dim self.added_proj_bias = added_proj_bias self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) # QK Norm self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.to_out = torch.nn.ModuleList([]) self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(torch.nn.Dropout(dropout)) if added_kv_proj_dim is not None: self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps) self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps) self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias) if processor is None: processor = self._default_processor_cls() self.set_processor(processor) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, **kwargs, ) -> torch.Tensor: attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters] if len(unused_kwargs) > 0: logger.warning( f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) class Flux2ParallelSelfAttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.") def __call__( self, attn: "Flux2ParallelSelfAttention", hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: # Parallel in (QKV + MLP in) projection hidden_states = attn.to_qkv_mlp_proj(hidden_states) qkv, mlp_hidden_states = torch.split( hidden_states, [3 * attn.inner_dim, attn.mlp_hidden_dim * attn.mlp_mult_factor], dim=-1 ) # Handle the attention logic query, key, value = qkv.chunk(3, dim=-1) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) query = attn.norm_q(query) key = attn.norm_k(key) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, backend=self._attention_backend, parallel_config=self._parallel_config, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) # Handle the feedforward (FF) logic mlp_hidden_states = attn.mlp_act_fn(mlp_hidden_states) # Concatenate and parallel output projection hidden_states = torch.cat([hidden_states, mlp_hidden_states], dim=-1) hidden_states = attn.to_out(hidden_states) return hidden_states class Flux2ParallelSelfAttention(torch.nn.Module, AttentionModuleMixin): """ Flux 2 parallel self-attention for the Flux 2 single-stream transformer blocks. This implements a parallel transformer block, where the attention QKV projections are fused to the feedforward (FF) input projections, and the attention output projections are fused to the FF output projections. See the [ViT-22B paper](https://arxiv.org/abs/2302.05442) for a visual depiction of this type of transformer block. """ _default_processor_cls = Flux2ParallelSelfAttnProcessor _available_processors = [Flux2ParallelSelfAttnProcessor] # Does not support QKV fusion as the QKV projections are always fused _supports_qkv_fusion = False def __init__( self, query_dim: int, heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, out_bias: bool = True, eps: float = 1e-5, out_dim: int = None, elementwise_affine: bool = True, mlp_ratio: float = 4.0, mlp_mult_factor: int = 2, processor=None, ): super().__init__() self.head_dim = dim_head self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.out_dim = out_dim if out_dim is not None else query_dim self.heads = out_dim // dim_head if out_dim is not None else heads self.use_bias = bias self.dropout = dropout self.mlp_ratio = mlp_ratio self.mlp_hidden_dim = int(query_dim * self.mlp_ratio) self.mlp_mult_factor = mlp_mult_factor # Fused QKV projections + MLP input projection self.to_qkv_mlp_proj = torch.nn.Linear( self.query_dim, self.inner_dim * 3 + self.mlp_hidden_dim * self.mlp_mult_factor, bias=bias ) self.mlp_act_fn = Flux2SwiGLU() # QK Norm self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) # Fused attention output projection + MLP output projection self.to_out = torch.nn.Linear(self.inner_dim + self.mlp_hidden_dim, self.out_dim, bias=out_bias) if processor is None: processor = self._default_processor_cls() self.set_processor(processor) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, **kwargs, ) -> torch.Tensor: attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters] if len(unused_kwargs) > 0: logger.warning( f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} return self.processor(self, hidden_states, attention_mask, image_rotary_emb, **kwargs) class Flux2SingleTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 3.0, eps: float = 1e-6, bias: bool = False, ): super().__init__() self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) # Note that the MLP in/out linear layers are fused with the attention QKV/out projections, respectively; this # is often called a "parallel" transformer block. See the [ViT-22B paper](https://arxiv.org/abs/2302.05442) # for a visual depiction of this type of transformer block. self.attn = Flux2ParallelSelfAttention( query_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=bias, out_bias=bias, eps=eps, mlp_ratio=mlp_ratio, mlp_mult_factor=2, processor=Flux2ParallelSelfAttnProcessor(), ) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None, temb_mod: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, joint_attention_kwargs: dict[str, Any] | None = None, split_hidden_states: bool = False, text_seq_len: int | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: # If encoder_hidden_states is None, hidden_states is assumed to have encoder_hidden_states already # concatenated if encoder_hidden_states is not None: text_seq_len = encoder_hidden_states.shape[1] hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) mod_shift, mod_scale, mod_gate = Flux2Modulation.split(temb_mod, 1)[0] norm_hidden_states = self.norm(hidden_states) norm_hidden_states = (1 + mod_scale) * norm_hidden_states + mod_shift joint_attention_kwargs = joint_attention_kwargs or {} attn_output = self.attn( hidden_states=norm_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) hidden_states = hidden_states + mod_gate * attn_output if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) if split_hidden_states: encoder_hidden_states, hidden_states = hidden_states[:, :text_seq_len], hidden_states[:, text_seq_len:] return encoder_hidden_states, hidden_states else: return hidden_states class Flux2TransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 3.0, eps: float = 1e-6, bias: bool = False, ): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) self.norm1 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) self.norm1_context = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) self.attn = Flux2Attention( query_dim=dim, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=bias, added_proj_bias=bias, out_bias=bias, eps=eps, processor=Flux2AttnProcessor(), ) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) self.ff = Flux2FeedForward(dim=dim, dim_out=dim, mult=mlp_ratio, bias=bias) self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=eps) self.ff_context = Flux2FeedForward(dim=dim, dim_out=dim, mult=mlp_ratio, bias=bias) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb_mod_img: torch.Tensor, temb_mod_txt: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, joint_attention_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: joint_attention_kwargs = joint_attention_kwargs or {} # Modulation parameters shape: [1, 1, self.dim] (shift_msa, scale_msa, gate_msa), (shift_mlp, scale_mlp, gate_mlp) = Flux2Modulation.split(temb_mod_img, 2) (c_shift_msa, c_scale_msa, c_gate_msa), (c_shift_mlp, c_scale_mlp, c_gate_mlp) = Flux2Modulation.split( temb_mod_txt, 2 ) # Img stream norm_hidden_states = self.norm1(hidden_states) norm_hidden_states = (1 + scale_msa) * norm_hidden_states + shift_msa # Conditioning txt stream norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states) norm_encoder_hidden_states = (1 + c_scale_msa) * norm_encoder_hidden_states + c_shift_msa # Attention on concatenated img + txt stream attention_outputs = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) attn_output, context_attn_output = attention_outputs # Process attention outputs for the image stream (`hidden_states`). attn_output = gate_msa * attn_output hidden_states = hidden_states + attn_output norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp ff_output = self.ff(norm_hidden_states) hidden_states = hidden_states + gate_mlp * ff_output # Process attention outputs for the text stream (`encoder_hidden_states`). context_attn_output = c_gate_msa * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp) + c_shift_mlp context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp * context_ff_output if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return encoder_hidden_states, hidden_states class Flux2PosEmbed(nn.Module): # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11 def __init__(self, theta: int, axes_dim: list[int]): super().__init__() self.theta = theta self.axes_dim = axes_dim def forward(self, ids: torch.Tensor) -> torch.Tensor: # Expected ids shape: [S, len(self.axes_dim)] cos_out = [] sin_out = [] pos = ids.float() is_mps = ids.device.type == "mps" is_npu = ids.device.type == "npu" freqs_dtype = torch.float32 if (is_mps or is_npu) else torch.float64 # Unlike Flux 1, loop over len(self.axes_dim) rather than ids.shape[-1] for i in range(len(self.axes_dim)): cos, sin = get_1d_rotary_pos_embed( self.axes_dim[i], pos[..., i], theta=self.theta, repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype, ) cos_out.append(cos) sin_out.append(sin) freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) return freqs_cos, freqs_sin class Flux2TimestepGuidanceEmbeddings(nn.Module): def __init__( self, in_channels: int = 256, embedding_dim: int = 6144, bias: bool = False, guidance_embeds: bool = True, ): super().__init__() self.time_proj = Timesteps(num_channels=in_channels, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding( in_channels=in_channels, time_embed_dim=embedding_dim, sample_proj_bias=bias ) if guidance_embeds: self.guidance_embedder = TimestepEmbedding( in_channels=in_channels, time_embed_dim=embedding_dim, sample_proj_bias=bias ) else: self.guidance_embedder = None def forward(self, timestep: torch.Tensor, guidance: torch.Tensor) -> torch.Tensor: timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(timestep.dtype)) # (N, D) if guidance is not None and self.guidance_embedder is not None: guidance_proj = self.time_proj(guidance) guidance_emb = self.guidance_embedder(guidance_proj.to(guidance.dtype)) # (N, D) time_guidance_emb = timesteps_emb + guidance_emb return time_guidance_emb else: return timesteps_emb class Flux2Modulation(nn.Module): def __init__(self, dim: int, mod_param_sets: int = 2, bias: bool = False): super().__init__() self.mod_param_sets = mod_param_sets self.linear = nn.Linear(dim, dim * 3 * self.mod_param_sets, bias=bias) self.act_fn = nn.SiLU() def forward(self, temb: torch.Tensor) -> torch.Tensor: mod = self.act_fn(temb) mod = self.linear(mod) return mod @staticmethod # split inside the transformer blocks, to avoid passing tuples into checkpoints https://github.com/huggingface/diffusers/issues/12776 def split(mod: torch.Tensor, mod_param_sets: int) -> tuple[tuple[torch.Tensor, torch.Tensor, torch.Tensor], ...]: if mod.ndim == 2: mod = mod.unsqueeze(1) mod_params = torch.chunk(mod, 3 * mod_param_sets, dim=-1) # Return tuple of 3-tuples of modulation params shift/scale/gate return tuple(mod_params[3 * i : 3 * (i + 1)] for i in range(mod_param_sets)) class Flux2Transformer2DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, FluxTransformer2DLoadersMixin, CacheMixin, AttentionMixin, ): """ The Transformer model introduced in Flux 2. Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ Args: patch_size (`int`, defaults to `1`): Patch size to turn the input data into small patches. in_channels (`int`, defaults to `128`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `None`): The number of channels in the output. If not specified, it defaults to `in_channels`. num_layers (`int`, defaults to `8`): The number of layers of dual stream DiT blocks to use. num_single_layers (`int`, defaults to `48`): The number of layers of single stream DiT blocks to use. attention_head_dim (`int`, defaults to `128`): The number of dimensions to use for each attention head. num_attention_heads (`int`, defaults to `48`): The number of attention heads to use. joint_attention_dim (`int`, defaults to `15360`): The number of dimensions to use for the joint attention (embedding/channel dimension of `encoder_hidden_states`). pooled_projection_dim (`int`, defaults to `768`): The number of dimensions to use for the pooled projection. guidance_embeds (`bool`, defaults to `True`): Whether to use guidance embeddings for guidance-distilled variant of the model. axes_dims_rope (`tuple[int]`, defaults to `(32, 32, 32, 32)`): The dimensions to use for the rotary positional embeddings. """ _supports_gradient_checkpointing = True _no_split_modules = ["Flux2TransformerBlock", "Flux2SingleTransformerBlock"] _skip_layerwise_casting_patterns = ["pos_embed", "norm"] _repeated_blocks = ["Flux2TransformerBlock", "Flux2SingleTransformerBlock"] _cp_plan = { "": { "hidden_states": ContextParallelInput(split_dim=1, expected_dims=3, split_output=False), "encoder_hidden_states": ContextParallelInput(split_dim=1, expected_dims=3, split_output=False), "img_ids": ContextParallelInput(split_dim=1, expected_dims=3, split_output=False), "txt_ids": ContextParallelInput(split_dim=1, expected_dims=3, split_output=False), }, "proj_out": ContextParallelOutput(gather_dim=1, expected_dims=3), } @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 128, out_channels: int | None = None, num_layers: int = 8, num_single_layers: int = 48, attention_head_dim: int = 128, num_attention_heads: int = 48, joint_attention_dim: int = 15360, timestep_guidance_channels: int = 256, mlp_ratio: float = 3.0, axes_dims_rope: tuple[int, ...] = (32, 32, 32, 32), rope_theta: int = 2000, eps: float = 1e-6, guidance_embeds: bool = True, ): super().__init__() self.out_channels = out_channels or in_channels self.inner_dim = num_attention_heads * attention_head_dim # 1. Sinusoidal positional embedding for RoPE on image and text tokens self.pos_embed = Flux2PosEmbed(theta=rope_theta, axes_dim=axes_dims_rope) # 2. Combined timestep + guidance embedding self.time_guidance_embed = Flux2TimestepGuidanceEmbeddings( in_channels=timestep_guidance_channels, embedding_dim=self.inner_dim, bias=False, guidance_embeds=guidance_embeds, ) # 3. Modulation (double stream and single stream blocks share modulation parameters, resp.) # Two sets of shift/scale/gate modulation parameters for the double stream attn and FF sub-blocks self.double_stream_modulation_img = Flux2Modulation(self.inner_dim, mod_param_sets=2, bias=False) self.double_stream_modulation_txt = Flux2Modulation(self.inner_dim, mod_param_sets=2, bias=False) # Only one set of modulation parameters as the attn and FF sub-blocks are run in parallel for single stream self.single_stream_modulation = Flux2Modulation(self.inner_dim, mod_param_sets=1, bias=False) # 4. Input projections self.x_embedder = nn.Linear(in_channels, self.inner_dim, bias=False) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim, bias=False) # 5. Double Stream Transformer Blocks self.transformer_blocks = nn.ModuleList( [ Flux2TransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, mlp_ratio=mlp_ratio, eps=eps, bias=False, ) for _ in range(num_layers) ] ) # 6. Single Stream Transformer Blocks self.single_transformer_blocks = nn.ModuleList( [ Flux2SingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, mlp_ratio=mlp_ratio, eps=eps, bias=False, ) for _ in range(num_single_layers) ] ) # 7. Output layers self.norm_out = AdaLayerNormContinuous( self.inner_dim, self.inner_dim, elementwise_affine=False, eps=eps, bias=False ) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=False) self.gradient_checkpointing = False @apply_lora_scale("joint_attention_kwargs") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: dict[str, Any] | None = None, return_dict: bool = True, ) -> torch.Tensor | Transformer2DModelOutput: """ The [`FluxTransformer2DModel`] forward method. Args: hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): Input `hidden_states`. encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. timestep ( `torch.LongTensor`): Used to indicate denoising step. block_controlnet_hidden_states: (`list` of `torch.Tensor`): A list of tensors that if specified are added to the residuals of transformer blocks. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ # 0. Handle input arguments num_txt_tokens = encoder_hidden_states.shape[1] # 1. Calculate timestep embedding and modulation parameters timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 temb = self.time_guidance_embed(timestep, guidance) double_stream_mod_img = self.double_stream_modulation_img(temb) double_stream_mod_txt = self.double_stream_modulation_txt(temb) single_stream_mod = self.single_stream_modulation(temb) # 2. Input projection for image (hidden_states) and conditioning text (encoder_hidden_states) hidden_states = self.x_embedder(hidden_states) encoder_hidden_states = self.context_embedder(encoder_hidden_states) # 3. Calculate RoPE embeddings from image and text tokens # NOTE: the below logic means that we can't support batched inference with images of different resolutions or # text prompts of differents lengths. Is this a use case we want to support? if img_ids.ndim == 3: img_ids = img_ids[0] if txt_ids.ndim == 3: txt_ids = txt_ids[0] image_rotary_emb = self.pos_embed(img_ids) text_rotary_emb = self.pos_embed(txt_ids) concat_rotary_emb = ( torch.cat([text_rotary_emb[0], image_rotary_emb[0]], dim=0), torch.cat([text_rotary_emb[1], image_rotary_emb[1]], dim=0), ) # 4. Double Stream Transformer Blocks for index_block, block in enumerate(self.transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, double_stream_mod_img, double_stream_mod_txt, concat_rotary_emb, joint_attention_kwargs, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb_mod_img=double_stream_mod_img, temb_mod_txt=double_stream_mod_txt, image_rotary_emb=concat_rotary_emb, joint_attention_kwargs=joint_attention_kwargs, ) # Concatenate text and image streams for single-block inference hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) # 5. Single Stream Transformer Blocks for index_block, block in enumerate(self.single_transformer_blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, None, single_stream_mod, concat_rotary_emb, joint_attention_kwargs, ) else: hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=None, temb_mod=single_stream_mod, image_rotary_emb=concat_rotary_emb, joint_attention_kwargs=joint_attention_kwargs, ) # Remove text tokens from concatenated stream hidden_states = hidden_states[:, num_txt_tokens:, ...] # 6. Output layers hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_flux2.py", "license": "Apache License 2.0", "lines": 760, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/flux2/image_processor.py
# Copyright 2025 The Black Forest Labs Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import PIL.Image from ...configuration_utils import register_to_config from ...image_processor import VaeImageProcessor class Flux2ImageProcessor(VaeImageProcessor): r""" Image processor to preprocess the reference (character) image for the Flux2 model. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method. vae_scale_factor (`int`, *optional*, defaults to `16`): VAE (spatial) scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. vae_latent_channels (`int`, *optional*, defaults to `32`): VAE latent channels. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image to [-1,1]. do_convert_rgb (`bool`, *optional*, defaults to be `True`): Whether to convert the images to RGB format. """ @register_to_config def __init__( self, do_resize: bool = True, vae_scale_factor: int = 16, vae_latent_channels: int = 32, do_normalize: bool = True, do_convert_rgb: bool = True, ): super().__init__( do_resize=do_resize, vae_scale_factor=vae_scale_factor, vae_latent_channels=vae_latent_channels, do_normalize=do_normalize, do_convert_rgb=do_convert_rgb, ) @staticmethod def check_image_input( image: PIL.Image.Image, max_aspect_ratio: int = 8, min_side_length: int = 64, max_area: int = 1024 * 1024 ) -> PIL.Image.Image: """ Check if image meets minimum size and aspect ratio requirements. Args: image: PIL Image to validate max_aspect_ratio: Maximum allowed aspect ratio (width/height or height/width) min_side_length: Minimum pixels required for width and height max_area: Maximum allowed area in pixels² Returns: The input image if valid Raises: ValueError: If image is too small or aspect ratio is too extreme """ if not isinstance(image, PIL.Image.Image): raise ValueError(f"Image must be a PIL.Image.Image, got {type(image)}") width, height = image.size # Check minimum dimensions if width < min_side_length or height < min_side_length: raise ValueError( f"Image too small: {width}×{height}. Both dimensions must be at least {min_side_length}px" ) # Check aspect ratio aspect_ratio = max(width / height, height / width) if aspect_ratio > max_aspect_ratio: raise ValueError( f"Aspect ratio too extreme: {width}×{height} (ratio: {aspect_ratio:.1f}:1). " f"Maximum allowed ratio is {max_aspect_ratio}:1" ) return image @staticmethod def _resize_to_target_area(image: PIL.Image.Image, target_area: int = 1024 * 1024) -> PIL.Image.Image: image_width, image_height = image.size scale = math.sqrt(target_area / (image_width * image_height)) width = int(image_width * scale) height = int(image_height * scale) return image.resize((width, height), PIL.Image.Resampling.LANCZOS) @staticmethod def _resize_if_exceeds_area(image, target_area=1024 * 1024) -> PIL.Image.Image: image_width, image_height = image.size pixel_count = image_width * image_height if pixel_count <= target_area: return image return Flux2ImageProcessor._resize_to_target_area(image, target_area) def _resize_and_crop( self, image: PIL.Image.Image, width: int, height: int, ) -> PIL.Image.Image: r""" center crop the image to the specified width and height. Args: image (`PIL.Image.Image`): The image to resize and crop. width (`int`): The width to resize the image to. height (`int`): The height to resize the image to. Returns: `PIL.Image.Image`: The resized and cropped image. """ image_width, image_height = image.size left = (image_width - width) // 2 top = (image_height - height) // 2 right = left + width bottom = top + height return image.crop((left, top, right, bottom)) # Taken from # https://github.com/black-forest-labs/flux2/blob/5a5d316b1b42f6b59a8c9194b77c8256be848432/src/flux2/sampling.py#L310C1-L339C19 @staticmethod def concatenate_images(images: list[PIL.Image.Image]) -> PIL.Image.Image: """ Concatenate a list of PIL images horizontally with center alignment and white background. """ # If only one image, return a copy of it if len(images) == 1: return images[0].copy() # Convert all images to RGB if not already images = [img.convert("RGB") if img.mode != "RGB" else img for img in images] # Calculate dimensions for horizontal concatenation total_width = sum(img.width for img in images) max_height = max(img.height for img in images) # Create new image with white background background_color = (255, 255, 255) new_img = PIL.Image.new("RGB", (total_width, max_height), background_color) # Paste images with center alignment x_offset = 0 for img in images: y_offset = (max_height - img.height) // 2 new_img.paste(img, (x_offset, y_offset)) x_offset += img.width return new_img
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/flux2/image_processor.py", "license": "Apache License 2.0", "lines": 146, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/flux2/pipeline_flux2.py
# Copyright 2025 Black Forest Labs and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import numpy as np import PIL import torch from transformers import AutoProcessor, Mistral3ForConditionalGeneration from ...loaders import Flux2LoraLoaderMixin from ...models import AutoencoderKLFlux2, Flux2Transformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .image_processor import Flux2ImageProcessor from .pipeline_output import Flux2PipelineOutput from .system_messages import SYSTEM_MESSAGE, SYSTEM_MESSAGE_UPSAMPLING_I2I, SYSTEM_MESSAGE_UPSAMPLING_T2I if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import Flux2Pipeline >>> pipe = Flux2Pipeline.from_pretrained("black-forest-labs/FLUX.2-dev", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> prompt = "A cat holding a sign that says hello world" >>> # Depending on the variant being used, the pipeline call will slightly vary. >>> # Refer to the pipeline documentation for more details. >>> image = pipe(prompt, num_inference_steps=50, guidance_scale=2.5).images[0] >>> image.save("flux.png") ``` """ UPSAMPLING_MAX_IMAGE_SIZE = 768**2 # Adapted from # https://github.com/black-forest-labs/flux2/blob/5a5d316b1b42f6b59a8c9194b77c8256be848432/src/flux2/text_encoder.py#L68 def format_input( prompts: list[str], system_message: str = SYSTEM_MESSAGE, images: list[PIL.Image.Image, list[list[PIL.Image.Image]]] | None = None, ): """ Format a batch of text prompts into the conversation format expected by apply_chat_template. Optionally, add images to the input. Args: prompts: List of text prompts system_message: System message to use (default: CREATIVE_SYSTEM_MESSAGE) images (optional): List of images to add to the input. Returns: List of conversations, where each conversation is a list of message dicts """ # Remove [IMG] tokens from prompts to avoid Pixtral validation issues # when truncation is enabled. The processor counts [IMG] tokens and fails # if the count changes after truncation. cleaned_txt = [prompt.replace("[IMG]", "") for prompt in prompts] if images is None or len(images) == 0: return [ [ { "role": "system", "content": [{"type": "text", "text": system_message}], }, {"role": "user", "content": [{"type": "text", "text": prompt}]}, ] for prompt in cleaned_txt ] else: assert len(images) == len(prompts), "Number of images must match number of prompts" messages = [ [ { "role": "system", "content": [{"type": "text", "text": system_message}], }, ] for _ in cleaned_txt ] for i, (el, images) in enumerate(zip(messages, images)): # optionally add the images per batch element. if images is not None: el.append( { "role": "user", "content": [{"type": "image", "image": image_obj} for image_obj in images], } ) # add the text. el.append( { "role": "user", "content": [{"type": "text", "text": cleaned_txt[i]}], } ) return messages # Adapted from # https://github.com/black-forest-labs/flux2/blob/5a5d316b1b42f6b59a8c9194b77c8256be848432/src/flux2/text_encoder.py#L49C5-L66C19 def _validate_and_process_images( images: list[list[PIL.Image.Image]] | list[PIL.Image.Image], image_processor: Flux2ImageProcessor, upsampling_max_image_size: int, ) -> list[list[PIL.Image.Image]]: # Simple validation: ensure it's a list of PIL images or list of lists of PIL images if not images: return [] # Check if it's a list of lists or a list of images if isinstance(images[0], PIL.Image.Image): # It's a list of images, convert to list of lists images = [[im] for im in images] # potentially concatenate multiple images to reduce the size images = [[image_processor.concatenate_images(img_i)] if len(img_i) > 1 else img_i for img_i in images] # cap the pixels images = [ [image_processor._resize_if_exceeds_area(img_i, upsampling_max_image_size) for img_i in img_i] for img_i in images ] return images # Taken from # https://github.com/black-forest-labs/flux2/blob/5a5d316b1b42f6b59a8c9194b77c8256be848432/src/flux2/sampling.py#L251 def compute_empirical_mu(image_seq_len: int, num_steps: int) -> float: a1, b1 = 8.73809524e-05, 1.89833333 a2, b2 = 0.00016927, 0.45666666 if image_seq_len > 4300: mu = a2 * image_seq_len + b2 return float(mu) m_200 = a2 * image_seq_len + b2 m_10 = a1 * image_seq_len + b1 a = (m_200 - m_10) / 190.0 b = m_200 - 200.0 * a mu = a * num_steps + b return float(mu) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class Flux2Pipeline(DiffusionPipeline, Flux2LoraLoaderMixin): r""" The Flux2 pipeline for text-to-image generation. Reference: [https://bfl.ai/blog/flux-2](https://bfl.ai/blog/flux-2) Args: transformer ([`Flux2Transformer2DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLFlux2`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`Mistral3ForConditionalGeneration`]): [Mistral3ForConditionalGeneration](https://huggingface.co/docs/transformers/en/model_doc/mistral3#transformers.Mistral3ForConditionalGeneration) tokenizer (`AutoProcessor`): Tokenizer of class [PixtralProcessor](https://huggingface.co/docs/transformers/en/model_doc/pixtral#transformers.PixtralProcessor). """ model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKLFlux2, text_encoder: Mistral3ForConditionalGeneration, tokenizer: AutoProcessor, transformer: Flux2Transformer2DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, transformer=transformer, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = Flux2ImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.tokenizer_max_length = 512 self.default_sample_size = 128 self.system_message = SYSTEM_MESSAGE self.system_message_upsampling_t2i = SYSTEM_MESSAGE_UPSAMPLING_T2I self.system_message_upsampling_i2i = SYSTEM_MESSAGE_UPSAMPLING_I2I self.upsampling_max_image_size = UPSAMPLING_MAX_IMAGE_SIZE @staticmethod def _get_mistral_3_small_prompt_embeds( text_encoder: Mistral3ForConditionalGeneration, tokenizer: AutoProcessor, prompt: str | list[str], dtype: torch.dtype | None = None, device: torch.device | None = None, max_sequence_length: int = 512, system_message: str = SYSTEM_MESSAGE, hidden_states_layers: list[int] = (10, 20, 30), ): dtype = text_encoder.dtype if dtype is None else dtype device = text_encoder.device if device is None else device prompt = [prompt] if isinstance(prompt, str) else prompt # Format input messages messages_batch = format_input(prompts=prompt, system_message=system_message) # Process all messages at once inputs = tokenizer.apply_chat_template( messages_batch, add_generation_prompt=False, tokenize=True, return_dict=True, return_tensors="pt", padding="max_length", truncation=True, max_length=max_sequence_length, ) # Move to device input_ids = inputs["input_ids"].to(device) attention_mask = inputs["attention_mask"].to(device) # Forward pass through the model output = text_encoder( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, use_cache=False, ) # Only use outputs from intermediate layers and stack them out = torch.stack([output.hidden_states[k] for k in hidden_states_layers], dim=1) out = out.to(dtype=dtype, device=device) batch_size, num_channels, seq_len, hidden_dim = out.shape prompt_embeds = out.permute(0, 2, 1, 3).reshape(batch_size, seq_len, num_channels * hidden_dim) return prompt_embeds @staticmethod def _prepare_text_ids( x: torch.Tensor, # (B, L, D) or (L, D) t_coord: torch.Tensor | None = None, ): B, L, _ = x.shape out_ids = [] for i in range(B): t = torch.arange(1) if t_coord is None else t_coord[i] h = torch.arange(1) w = torch.arange(1) l = torch.arange(L) coords = torch.cartesian_prod(t, h, w, l) out_ids.append(coords) return torch.stack(out_ids) @staticmethod def _prepare_latent_ids( latents: torch.Tensor, # (B, C, H, W) ): r""" Generates 4D position coordinates (T, H, W, L) for latent tensors. Args: latents (torch.Tensor): Latent tensor of shape (B, C, H, W) Returns: torch.Tensor: Position IDs tensor of shape (B, H*W, 4) All batches share the same coordinate structure: T=0, H=[0..H-1], W=[0..W-1], L=0 """ batch_size, _, height, width = latents.shape t = torch.arange(1) # [0] - time dimension h = torch.arange(height) w = torch.arange(width) l = torch.arange(1) # [0] - layer dimension # Create position IDs: (H*W, 4) latent_ids = torch.cartesian_prod(t, h, w, l) # Expand to batch: (B, H*W, 4) latent_ids = latent_ids.unsqueeze(0).expand(batch_size, -1, -1) return latent_ids @staticmethod def _prepare_image_ids( image_latents: list[torch.Tensor], # [(1, C, H, W), (1, C, H, W), ...] scale: int = 10, ): r""" Generates 4D time-space coordinates (T, H, W, L) for a sequence of image latents. This function creates a unique coordinate for every pixel/patch across all input latent with different dimensions. Args: image_latents (list[torch.Tensor]): A list of image latent feature tensors, typically of shape (C, H, W). scale (int, optional): A factor used to define the time separation (T-coordinate) between latents. T-coordinate for the i-th latent is: 'scale + scale * i'. Defaults to 10. Returns: torch.Tensor: The combined coordinate tensor. Shape: (1, N_total, 4) Where N_total is the sum of (H * W) for all input latents. Coordinate Components (Dimension 4): - T (Time): The unique index indicating which latent image the coordinate belongs to. - H (Height): The row index within that latent image. - W (Width): The column index within that latent image. - L (Seq. Length): A sequence length dimension, which is always fixed at 0 (size 1) """ if not isinstance(image_latents, list): raise ValueError(f"Expected `image_latents` to be a list, got {type(image_latents)}.") # create time offset for each reference image t_coords = [scale + scale * t for t in torch.arange(0, len(image_latents))] t_coords = [t.view(-1) for t in t_coords] image_latent_ids = [] for x, t in zip(image_latents, t_coords): x = x.squeeze(0) _, height, width = x.shape x_ids = torch.cartesian_prod(t, torch.arange(height), torch.arange(width), torch.arange(1)) image_latent_ids.append(x_ids) image_latent_ids = torch.cat(image_latent_ids, dim=0) image_latent_ids = image_latent_ids.unsqueeze(0) return image_latent_ids @staticmethod def _patchify_latents(latents): batch_size, num_channels_latents, height, width = latents.shape latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 1, 3, 5, 2, 4) latents = latents.reshape(batch_size, num_channels_latents * 4, height // 2, width // 2) return latents @staticmethod def _unpatchify_latents(latents): batch_size, num_channels_latents, height, width = latents.shape latents = latents.reshape(batch_size, num_channels_latents // (2 * 2), 2, 2, height, width) latents = latents.permute(0, 1, 4, 2, 5, 3) latents = latents.reshape(batch_size, num_channels_latents // (2 * 2), height * 2, width * 2) return latents @staticmethod def _pack_latents(latents): """ pack latents: (batch_size, num_channels, height, width) -> (batch_size, height * width, num_channels) """ batch_size, num_channels, height, width = latents.shape latents = latents.reshape(batch_size, num_channels, height * width).permute(0, 2, 1) return latents @staticmethod def _unpack_latents_with_ids(x: torch.Tensor, x_ids: torch.Tensor) -> list[torch.Tensor]: """ using position ids to scatter tokens into place """ x_list = [] for data, pos in zip(x, x_ids): _, ch = data.shape # noqa: F841 h_ids = pos[:, 1].to(torch.int64) w_ids = pos[:, 2].to(torch.int64) h = torch.max(h_ids) + 1 w = torch.max(w_ids) + 1 flat_ids = h_ids * w + w_ids out = torch.zeros((h * w, ch), device=data.device, dtype=data.dtype) out.scatter_(0, flat_ids.unsqueeze(1).expand(-1, ch), data) # reshape from (H * W, C) to (H, W, C) and permute to (C, H, W) out = out.view(h, w, ch).permute(2, 0, 1) x_list.append(out) return torch.stack(x_list, dim=0) def upsample_prompt( self, prompt: str | list[str], images: list[PIL.Image.Image, list[list[PIL.Image.Image]]] = None, temperature: float = 0.15, device: torch.device = None, ) -> list[str]: prompt = [prompt] if isinstance(prompt, str) else prompt device = self.text_encoder.device if device is None else device # Set system message based on whether images are provided if images is None or len(images) == 0 or images[0] is None: system_message = SYSTEM_MESSAGE_UPSAMPLING_T2I else: system_message = SYSTEM_MESSAGE_UPSAMPLING_I2I # Validate and process the input images if images: images = _validate_and_process_images(images, self.image_processor, self.upsampling_max_image_size) # Format input messages messages_batch = format_input(prompts=prompt, system_message=system_message, images=images) # Process all messages at once # with image processing a too short max length can throw an error in here. inputs = self.tokenizer.apply_chat_template( messages_batch, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", padding="max_length", truncation=True, max_length=2048, ) # Move to device inputs["input_ids"] = inputs["input_ids"].to(device) inputs["attention_mask"] = inputs["attention_mask"].to(device) if "pixel_values" in inputs: inputs["pixel_values"] = inputs["pixel_values"].to(device, self.text_encoder.dtype) # Generate text using the model's generate method generated_ids = self.text_encoder.generate( **inputs, max_new_tokens=512, do_sample=True, temperature=temperature, use_cache=True, ) # Decode only the newly generated tokens (skip input tokens) # Extract only the generated portion input_length = inputs["input_ids"].shape[1] generated_tokens = generated_ids[:, input_length:] upsampled_prompt = self.tokenizer.tokenizer.batch_decode( generated_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True ) return upsampled_prompt def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, num_images_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, max_sequence_length: int = 512, text_encoder_out_layers: tuple[int] = (10, 20, 30), ): device = device or self._execution_device if prompt is None: prompt = "" prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_embeds = self._get_mistral_3_small_prompt_embeds( text_encoder=self.text_encoder, tokenizer=self.tokenizer, prompt=prompt, device=device, max_sequence_length=max_sequence_length, system_message=self.system_message, hidden_states_layers=text_encoder_out_layers, ) batch_size, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) text_ids = self._prepare_text_ids(prompt_embeds) text_ids = text_ids.to(device) return prompt_embeds, text_ids def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if image.ndim != 4: raise ValueError(f"Expected image dims 4, got {image.ndim}.") image_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode="argmax") image_latents = self._patchify_latents(image_latents) latents_bn_mean = self.vae.bn.running_mean.view(1, -1, 1, 1).to(image_latents.device, image_latents.dtype) latents_bn_std = torch.sqrt(self.vae.bn.running_var.view(1, -1, 1, 1) + self.vae.config.batch_norm_eps) image_latents = (image_latents - latents_bn_mean) / latents_bn_std return image_latents def prepare_latents( self, batch_size, num_latents_channels, height, width, dtype, device, generator: torch.Generator, latents: torch.Tensor | None = None, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_latents_channels * 4, height // 2, width // 2) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) latent_ids = self._prepare_latent_ids(latents) latent_ids = latent_ids.to(device) latents = self._pack_latents(latents) # [B, C, H, W] -> [B, H*W, C] return latents, latent_ids def prepare_image_latents( self, images: list[torch.Tensor], batch_size, generator: torch.Generator, device, dtype, ): image_latents = [] for image in images: image = image.to(device=device, dtype=dtype) imagge_latent = self._encode_vae_image(image=image, generator=generator) image_latents.append(imagge_latent) # (1, 128, 32, 32) image_latent_ids = self._prepare_image_ids(image_latents) # Pack each latent and concatenate packed_latents = [] for latent in image_latents: # latent: (1, 128, 32, 32) packed = self._pack_latents(latent) # (1, 1024, 128) packed = packed.squeeze(0) # (1024, 128) - remove batch dim packed_latents.append(packed) # Concatenate all reference tokens along sequence dimension image_latents = torch.cat(packed_latents, dim=0) # (N*1024, 128) image_latents = image_latents.unsqueeze(0) # (1, N*1024, 128) image_latents = image_latents.repeat(batch_size, 1, 1) image_latent_ids = image_latent_ids.repeat(batch_size, 1, 1) image_latent_ids = image_latent_ids.to(device) return image_latents, image_latent_ids def check_inputs( self, prompt, height, width, prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if ( height is not None and height % (self.vae_scale_factor * 2) != 0 or width is not None and width % (self.vae_scale_factor * 2) != 0 ): logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") @property def guidance_scale(self): return self._guidance_scale @property def attention_kwargs(self): return self._attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: list[PIL.Image.Image, PIL.Image.Image] | None = None, prompt: str | list[str] = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, sigmas: list[float] | None = None, guidance_scale: float | None = 4.0, num_images_per_prompt: int = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, text_encoder_out_layers: tuple[int] = (10, 20, 30), caption_upsample_temperature: float = None, ): r""" Function invoked when calling the pipeline for generation. Args: image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `list[torch.Tensor]`, `list[PIL.Image.Image]`, or `list[np.ndarray]`): `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image latents as `image`, but if passing latents directly it is not encoded again. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. guidance_scale (`float`, *optional*, defaults to 1.0): Embedded guiddance scale is enabled by setting `guidance_scale` > 1. Higher `guidance_scale` encourages a model to generate images more aligned with `prompt` at the expense of lower image quality. Guidance-distilled models approximates true classifer-free guidance for `guidance_scale` > 1. Refer to the [paper](https://huggingface.co/papers/2210.03142) to learn more. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. text_encoder_out_layers (`tuple[int]`): Layer indices to use in the `text_encoder` to derive the final prompt embeddings. caption_upsample_temperature (`float`): When specified, we will try to perform caption upsampling for potentially improved outputs. We recommend setting it to 0.15 if caption upsampling is to be performed. Examples: Returns: [`~pipelines.flux2.Flux2PipelineOutput`] or `tuple`: [`~pipelines.flux2.Flux2PipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, prompt_embeds=prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # 3. prepare text embeddings if caption_upsample_temperature: prompt = self.upsample_prompt( prompt, images=image, temperature=caption_upsample_temperature, device=device ) prompt_embeds, text_ids = self.encode_prompt( prompt=prompt, prompt_embeds=prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, text_encoder_out_layers=text_encoder_out_layers, ) # 4. process images if image is not None and not isinstance(image, list): image = [image] condition_images = None if image is not None: for img in image: self.image_processor.check_image_input(img) condition_images = [] for img in image: image_width, image_height = img.size if image_width * image_height > 1024 * 1024: img = self.image_processor._resize_to_target_area(img, 1024 * 1024) image_width, image_height = img.size multiple_of = self.vae_scale_factor * 2 image_width = (image_width // multiple_of) * multiple_of image_height = (image_height // multiple_of) * multiple_of img = self.image_processor.preprocess(img, height=image_height, width=image_width, resize_mode="crop") condition_images.append(img) height = height or image_height width = width or image_width height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor # 5. prepare latent variables num_channels_latents = self.transformer.config.in_channels // 4 latents, latent_ids = self.prepare_latents( batch_size=batch_size * num_images_per_prompt, num_latents_channels=num_channels_latents, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=latents, ) image_latents = None image_latent_ids = None if condition_images is not None: image_latents, image_latent_ids = self.prepare_image_latents( images=condition_images, batch_size=batch_size * num_images_per_prompt, generator=generator, device=device, dtype=self.vae.dtype, ) # 6. Prepare timesteps sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas if hasattr(self.scheduler.config, "use_flow_sigmas") and self.scheduler.config.use_flow_sigmas: sigmas = None image_seq_len = latents.shape[1] mu = compute_empirical_mu(image_seq_len=image_seq_len, num_steps=num_inference_steps) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # handle guidance guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) # 7. Denoising loop # We set the index here to remove DtoH sync, helpful especially during compilation. # Check out more details here: https://github.com/huggingface/diffusers/pull/11696 self.scheduler.set_begin_index(0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]).to(latents.dtype) latent_model_input = latents.to(self.transformer.dtype) latent_image_ids = latent_ids if image_latents is not None: latent_model_input = torch.cat([latents, image_latents], dim=1).to(self.transformer.dtype) latent_image_ids = torch.cat([latent_ids, image_latent_ids], dim=1) noise_pred = self.transformer( hidden_states=latent_model_input, # (B, image_seq_len, C) timestep=timestep / 1000, guidance=guidance, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, # B, text_seq_len, 4 img_ids=latent_image_ids, # B, image_seq_len, 4 joint_attention_kwargs=self.attention_kwargs, return_dict=False, )[0] noise_pred = noise_pred[:, : latents.size(1) :] # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if output_type == "latent": image = latents else: latents = self._unpack_latents_with_ids(latents, latent_ids) latents_bn_mean = self.vae.bn.running_mean.view(1, -1, 1, 1).to(latents.device, latents.dtype) latents_bn_std = torch.sqrt(self.vae.bn.running_var.view(1, -1, 1, 1) + self.vae.config.batch_norm_eps).to( latents.device, latents.dtype ) latents = latents * latents_bn_std + latents_bn_mean latents = self._unpatchify_latents(latents) image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return Flux2PipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/flux2/pipeline_flux2.py", "license": "Apache License 2.0", "lines": 869, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/flux2/pipeline_output.py
from dataclasses import dataclass import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class Flux2PipelineOutput(BaseOutput): """ Output class for Flux2 image generation pipelines. Args: images (`list[PIL.Image.Image]` or `torch.Tensor` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array or torch tensor of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. Torch tensors can represent either the denoised images or the intermediate latents ready to be passed to the decoder. """ images: list[PIL.Image.Image, np.ndarray]
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/flux2/pipeline_output.py", "license": "Apache License 2.0", "lines": 16, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:tests/lora/test_lora_layers_flux2.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import numpy as np import torch from transformers import AutoProcessor, Mistral3ForConditionalGeneration from diffusers import AutoencoderKLFlux2, FlowMatchEulerDiscreteScheduler, Flux2Pipeline, Flux2Transformer2DModel from ..testing_utils import floats_tensor, require_peft_backend, torch_device sys.path.append(".") from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 @require_peft_backend class Flux2LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = Flux2Pipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_kwargs = {} transformer_kwargs = { "patch_size": 1, "in_channels": 4, "num_layers": 1, "num_single_layers": 1, "attention_head_dim": 16, "num_attention_heads": 2, "joint_attention_dim": 16, "timestep_guidance_channels": 256, "axes_dims_rope": [4, 4, 4, 4], } transformer_cls = Flux2Transformer2DModel vae_kwargs = { "sample_size": 32, "in_channels": 3, "out_channels": 3, "down_block_types": ("DownEncoderBlock2D",), "up_block_types": ("UpDecoderBlock2D",), "block_out_channels": (4,), "layers_per_block": 1, "latent_channels": 1, "norm_num_groups": 1, "use_quant_conv": False, "use_post_quant_conv": False, } vae_cls = AutoencoderKLFlux2 tokenizer_cls, tokenizer_id = AutoProcessor, "hf-internal-testing/tiny-mistral3-diffusers" text_encoder_cls, text_encoder_id = Mistral3ForConditionalGeneration, "hf-internal-testing/tiny-mistral3-diffusers" denoiser_target_modules = ["to_qkv_mlp_proj", "to_k"] supports_text_encoder_loras = False @property def output_shape(self): return (1, 8, 8, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 10 num_channels = 4 sizes = (32, 32) generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "a dog is dancing", "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 8, "output_type": "np", "text_encoder_out_layers": (1,), } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs # Overriding because (1) text encoder LoRAs are not supported in Flux 2 and (2) because the Flux 2 single block # QKV projections are always fused, it has no `to_q` param as expected by the original test. def test_lora_fuse_nan(self): components, _, denoiser_lora_config = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) _, _, inputs = self.get_dummy_inputs(with_generator=False) denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet denoiser.add_adapter(denoiser_lora_config, "adapter-1") self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") # corrupt one LoRA weight with `inf` values with torch.no_grad(): possible_tower_names = ["transformer_blocks", "single_transformer_blocks"] filtered_tower_names = [ tower_name for tower_name in possible_tower_names if hasattr(pipe.transformer, tower_name) ] if len(filtered_tower_names) == 0: reason = f"`pipe.transformer` didn't have any of the following attributes: {possible_tower_names}." raise ValueError(reason) for tower_name in filtered_tower_names: transformer_tower = getattr(pipe.transformer, tower_name) is_single = "single" in tower_name if is_single: transformer_tower[0].attn.to_qkv_mlp_proj.lora_A["adapter-1"].weight += float("inf") else: transformer_tower[0].attn.to_k.lora_A["adapter-1"].weight += float("inf") # with `safe_fusing=True` we should see an Error with self.assertRaises(ValueError): pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) # without we should not see an error, but every image will be black pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) out = pipe(**inputs)[0] self.assertTrue(np.isnan(out).all()) @unittest.skip("Not supported in Flux2.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in Flux2.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in Flux2.") def test_modify_padding_mode(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/lora/test_lora_layers_flux2.py", "license": "Apache License 2.0", "lines": 125, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/models/transformers/test_models_transformer_flux2.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import Flux2Transformer2DModel, attention_backend from ...testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin enable_full_determinism() class Flux2TransformerTests(ModelTesterMixin, unittest.TestCase): model_class = Flux2Transformer2DModel main_input_name = "hidden_states" # We override the items here because the transformer under consideration is small. model_split_percents = [0.7, 0.6, 0.6] # Skip setting testing with default: AttnProcessor uses_custom_attn_processor = True @property def dummy_input(self): return self.prepare_dummy_input() @property def input_shape(self): return (16, 4) @property def output_shape(self): return (16, 4) def prepare_dummy_input(self, height=4, width=4): batch_size = 1 num_latent_channels = 4 sequence_length = 48 embedding_dim = 32 hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) t_coords = torch.arange(1) h_coords = torch.arange(height) w_coords = torch.arange(width) l_coords = torch.arange(1) image_ids = torch.cartesian_prod(t_coords, h_coords, w_coords, l_coords) # [height * width, 4] image_ids = image_ids.unsqueeze(0).expand(batch_size, -1, -1).to(torch_device) text_t_coords = torch.arange(1) text_h_coords = torch.arange(1) text_w_coords = torch.arange(1) text_l_coords = torch.arange(sequence_length) text_ids = torch.cartesian_prod(text_t_coords, text_h_coords, text_w_coords, text_l_coords) text_ids = text_ids.unsqueeze(0).expand(batch_size, -1, -1).to(torch_device) timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size) guidance = torch.tensor([1.0]).to(torch_device).expand(batch_size) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "img_ids": image_ids, "txt_ids": text_ids, "timestep": timestep, "guidance": guidance, } def prepare_init_args_and_inputs_for_common(self): init_dict = { "patch_size": 1, "in_channels": 4, "num_layers": 1, "num_single_layers": 1, "attention_head_dim": 16, "num_attention_heads": 2, "joint_attention_dim": 32, "timestep_guidance_channels": 256, # Hardcoded in original code "axes_dims_rope": [4, 4, 4, 4], } inputs_dict = self.dummy_input return init_dict, inputs_dict # TODO (Daniel, Sayak): We can remove this test. def test_flux2_consistency(self, seed=0): torch.manual_seed(seed) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(seed) model = self.model_class(**init_dict) # state_dict = model.state_dict() # for key, param in state_dict.items(): # print(f"{key} | {param.shape}") # torch.save(state_dict, "/raid/daniel_gu/test_flux2_params/diffusers.pt") model.to(torch_device) model.eval() with attention_backend("native"): with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) # input & output have to have the same shape input_tensor = inputs_dict[self.main_input_name] expected_shape = input_tensor.shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") # Check against expected slice # fmt: off expected_slice = torch.tensor([-0.3662, 0.4844, 0.6334, -0.3497, 0.2162, 0.0188, 0.0521, -0.2061, -0.2041, -0.0342, -0.7107, 0.4797, -0.3280, 0.7059, -0.0849, 0.4416]) # fmt: on flat_output = output.cpu().flatten() generated_slice = torch.cat([flat_output[:8], flat_output[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-4)) def test_gradient_checkpointing_is_applied(self): expected_set = {"Flux2Transformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class Flux2TransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = Flux2Transformer2DModel different_shapes_for_compilation = [(4, 4), (4, 8), (8, 8)] def prepare_init_args_and_inputs_for_common(self): return Flux2TransformerTests().prepare_init_args_and_inputs_for_common() def prepare_dummy_input(self, height, width): return Flux2TransformerTests().prepare_dummy_input(height=height, width=width) class Flux2TransformerLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase): model_class = Flux2Transformer2DModel different_shapes_for_compilation = [(4, 4), (4, 8), (8, 8)] def prepare_init_args_and_inputs_for_common(self): return Flux2TransformerTests().prepare_init_args_and_inputs_for_common() def prepare_dummy_input(self, height, width): return Flux2TransformerTests().prepare_dummy_input(height=height, width=width)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_flux2.py", "license": "Apache License 2.0", "lines": 125, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/flux2/test_pipeline_flux2.py
import unittest import numpy as np import torch from transformers import AutoProcessor, Mistral3Config, Mistral3ForConditionalGeneration from diffusers import ( AutoencoderKLFlux2, FlowMatchEulerDiscreteScheduler, Flux2Pipeline, Flux2Transformer2DModel, ) from ...testing_utils import ( torch_device, ) from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fused_layers_exist, ) class Flux2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Flux2Pipeline params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds"]) batch_params = frozenset(["prompt"]) test_xformers_attention = False test_layerwise_casting = True test_group_offloading = True supports_dduf = False def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): torch.manual_seed(0) transformer = Flux2Transformer2DModel( patch_size=1, in_channels=4, num_layers=num_layers, num_single_layers=num_single_layers, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=16, timestep_guidance_channels=256, # Hardcoded in original code axes_dims_rope=[4, 4, 4, 4], ) config = Mistral3Config( text_config={ "model_type": "mistral", "vocab_size": 32000, "hidden_size": 16, "intermediate_size": 37, "max_position_embeddings": 512, "num_attention_heads": 4, "num_hidden_layers": 1, "num_key_value_heads": 2, "rms_norm_eps": 1e-05, "rope_theta": 1000000000.0, "sliding_window": None, "bos_token_id": 2, "eos_token_id": 3, "pad_token_id": 4, }, vision_config={ "model_type": "pixtral", "hidden_size": 16, "num_hidden_layers": 1, "num_attention_heads": 4, "intermediate_size": 37, "image_size": 30, "patch_size": 6, "num_channels": 3, }, bos_token_id=2, eos_token_id=3, pad_token_id=4, model_dtype="mistral3", image_seq_length=4, vision_feature_layer=-1, image_token_index=1, ) torch.manual_seed(0) text_encoder = Mistral3ForConditionalGeneration(config) tokenizer = AutoProcessor.from_pretrained( "hf-internal-testing/Mistral-Small-3.1-24B-Instruct-2503-only-processor" ) torch.manual_seed(0) vae = AutoencoderKLFlux2( sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownEncoderBlock2D",), up_block_types=("UpDecoderBlock2D",), block_out_channels=(4,), layers_per_block=1, latent_channels=1, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "transformer": transformer, "vae": vae, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "a dog is dancing", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 8, "output_type": "np", "text_encoder_out_layers": (1,), } return inputs def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() self.assertTrue( check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), ) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] self.assertTrue( np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ("Fusion of QKV projections shouldn't affect the outputs."), ) self.assertTrue( np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ("Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled."), ) self.assertTrue( np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ("Original outputs should match when fused QKV projections are disabled."), ) def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) height_width_pairs = [(32, 32), (72, 57)] for height, width in height_width_pairs: expected_height = height - height % (pipe.vae_scale_factor * 2) expected_width = width - width % (pipe.vae_scale_factor * 2) inputs.update({"height": height, "width": width}) image = pipe(**inputs).images[0] output_height, output_width, _ = image.shape self.assertEqual( (output_height, output_width), (expected_height, expected_width), f"Output shape {image.shape} does not match expected shape {(expected_height, expected_width)}", )
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/flux2/test_pipeline_flux2.py", "license": "Apache License 2.0", "lines": 167, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/models/transformers/transformer_z_image.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...models.attention_processor import Attention from ...models.modeling_utils import ModelMixin from ...models.normalization import RMSNorm from ...utils.torch_utils import maybe_allow_in_graph from ..attention_dispatch import dispatch_attention_fn from ..modeling_outputs import Transformer2DModelOutput ADALN_EMBED_DIM = 256 SEQ_MULTI_OF = 32 X_PAD_DIM = 64 class TimestepEmbedder(nn.Module): def __init__(self, out_size, mid_size=None, frequency_embedding_size=256): super().__init__() if mid_size is None: mid_size = out_size self.mlp = nn.Sequential( nn.Linear(frequency_embedding_size, mid_size, bias=True), nn.SiLU(), nn.Linear(mid_size, out_size, bias=True), ) self.frequency_embedding_size = frequency_embedding_size @staticmethod def timestep_embedding(t, dim, max_period=10000): with torch.amp.autocast("cuda", enabled=False): half = dim // 2 freqs = torch.exp( -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half ) args = t[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding def forward(self, t): t_freq = self.timestep_embedding(t, self.frequency_embedding_size) weight_dtype = self.mlp[0].weight.dtype compute_dtype = getattr(self.mlp[0], "compute_dtype", None) if weight_dtype.is_floating_point: t_freq = t_freq.to(weight_dtype) elif compute_dtype is not None: t_freq = t_freq.to(compute_dtype) t_emb = self.mlp(t_freq) return t_emb class ZSingleStreamAttnProcessor: """ Processor for Z-Image single stream attention that adapts the existing Attention class to match the behavior of the original Z-ImageAttention module. """ _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "ZSingleStreamAttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to version 2.0 or higher." ) def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, freqs_cis: torch.Tensor | None = None, ) -> torch.Tensor: query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) # Apply Norms if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) # Apply RoPE def apply_rotary_emb(x_in: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor: with torch.amp.autocast("cuda", enabled=False): x = torch.view_as_complex(x_in.float().reshape(*x_in.shape[:-1], -1, 2)) freqs_cis = freqs_cis.unsqueeze(2) x_out = torch.view_as_real(x * freqs_cis).flatten(3) return x_out.type_as(x_in) # todo if freqs_cis is not None: query = apply_rotary_emb(query, freqs_cis) key = apply_rotary_emb(key, freqs_cis) # Cast to correct dtype dtype = query.dtype query, key = query.to(dtype), key.to(dtype) # From [batch, seq_len] to [batch, 1, 1, seq_len] -> broadcast to [batch, heads, seq_len, seq_len] if attention_mask is not None and attention_mask.ndim == 2: attention_mask = attention_mask[:, None, None, :] # Compute joint attention hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, parallel_config=self._parallel_config, ) # Reshape back hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(dtype) output = attn.to_out[0](hidden_states) if len(attn.to_out) > 1: # dropout output = attn.to_out[1](output) return output def select_per_token( value_noisy: torch.Tensor, value_clean: torch.Tensor, noise_mask: torch.Tensor, seq_len: int, ) -> torch.Tensor: noise_mask_expanded = noise_mask.unsqueeze(-1) # (batch, seq_len, 1) return torch.where( noise_mask_expanded == 1, value_noisy.unsqueeze(1).expand(-1, seq_len, -1), value_clean.unsqueeze(1).expand(-1, seq_len, -1), ) class FeedForward(nn.Module): def __init__(self, dim: int, hidden_dim: int): super().__init__() self.w1 = nn.Linear(dim, hidden_dim, bias=False) self.w2 = nn.Linear(hidden_dim, dim, bias=False) self.w3 = nn.Linear(dim, hidden_dim, bias=False) def _forward_silu_gating(self, x1, x3): return F.silu(x1) * x3 def forward(self, x): return self.w2(self._forward_silu_gating(self.w1(x), self.w3(x))) @maybe_allow_in_graph class ZImageTransformerBlock(nn.Module): def __init__( self, layer_id: int, dim: int, n_heads: int, n_kv_heads: int, norm_eps: float, qk_norm: bool, modulation=True, ): super().__init__() self.dim = dim self.head_dim = dim // n_heads # Refactored to use diffusers Attention with custom processor # Original Z-Image params: dim, n_heads, n_kv_heads, qk_norm self.attention = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // n_heads, heads=n_heads, qk_norm="rms_norm" if qk_norm else None, eps=1e-5, bias=False, out_bias=False, processor=ZSingleStreamAttnProcessor(), ) self.feed_forward = FeedForward(dim=dim, hidden_dim=int(dim / 3 * 8)) self.layer_id = layer_id self.attention_norm1 = RMSNorm(dim, eps=norm_eps) self.ffn_norm1 = RMSNorm(dim, eps=norm_eps) self.attention_norm2 = RMSNorm(dim, eps=norm_eps) self.ffn_norm2 = RMSNorm(dim, eps=norm_eps) self.modulation = modulation if modulation: self.adaLN_modulation = nn.Sequential(nn.Linear(min(dim, ADALN_EMBED_DIM), 4 * dim, bias=True)) def forward( self, x: torch.Tensor, attn_mask: torch.Tensor, freqs_cis: torch.Tensor, adaln_input: torch.Tensor | None = None, noise_mask: torch.Tensor | None = None, adaln_noisy: torch.Tensor | None = None, adaln_clean: torch.Tensor | None = None, ): if self.modulation: seq_len = x.shape[1] if noise_mask is not None: # Per-token modulation: different modulation for noisy/clean tokens mod_noisy = self.adaLN_modulation(adaln_noisy) mod_clean = self.adaLN_modulation(adaln_clean) scale_msa_noisy, gate_msa_noisy, scale_mlp_noisy, gate_mlp_noisy = mod_noisy.chunk(4, dim=1) scale_msa_clean, gate_msa_clean, scale_mlp_clean, gate_mlp_clean = mod_clean.chunk(4, dim=1) gate_msa_noisy, gate_mlp_noisy = gate_msa_noisy.tanh(), gate_mlp_noisy.tanh() gate_msa_clean, gate_mlp_clean = gate_msa_clean.tanh(), gate_mlp_clean.tanh() scale_msa_noisy, scale_mlp_noisy = 1.0 + scale_msa_noisy, 1.0 + scale_mlp_noisy scale_msa_clean, scale_mlp_clean = 1.0 + scale_msa_clean, 1.0 + scale_mlp_clean scale_msa = select_per_token(scale_msa_noisy, scale_msa_clean, noise_mask, seq_len) scale_mlp = select_per_token(scale_mlp_noisy, scale_mlp_clean, noise_mask, seq_len) gate_msa = select_per_token(gate_msa_noisy, gate_msa_clean, noise_mask, seq_len) gate_mlp = select_per_token(gate_mlp_noisy, gate_mlp_clean, noise_mask, seq_len) else: # Global modulation: same modulation for all tokens (avoid double select) mod = self.adaLN_modulation(adaln_input) scale_msa, gate_msa, scale_mlp, gate_mlp = mod.unsqueeze(1).chunk(4, dim=2) gate_msa, gate_mlp = gate_msa.tanh(), gate_mlp.tanh() scale_msa, scale_mlp = 1.0 + scale_msa, 1.0 + scale_mlp # Attention block attn_out = self.attention( self.attention_norm1(x) * scale_msa, attention_mask=attn_mask, freqs_cis=freqs_cis ) x = x + gate_msa * self.attention_norm2(attn_out) # FFN block x = x + gate_mlp * self.ffn_norm2(self.feed_forward(self.ffn_norm1(x) * scale_mlp)) else: # Attention block attn_out = self.attention(self.attention_norm1(x), attention_mask=attn_mask, freqs_cis=freqs_cis) x = x + self.attention_norm2(attn_out) # FFN block x = x + self.ffn_norm2(self.feed_forward(self.ffn_norm1(x))) return x class FinalLayer(nn.Module): def __init__(self, hidden_size, out_channels): super().__init__() self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) self.linear = nn.Linear(hidden_size, out_channels, bias=True) self.adaLN_modulation = nn.Sequential( nn.SiLU(), nn.Linear(min(hidden_size, ADALN_EMBED_DIM), hidden_size, bias=True), ) def forward(self, x, c=None, noise_mask=None, c_noisy=None, c_clean=None): seq_len = x.shape[1] if noise_mask is not None: # Per-token modulation scale_noisy = 1.0 + self.adaLN_modulation(c_noisy) scale_clean = 1.0 + self.adaLN_modulation(c_clean) scale = select_per_token(scale_noisy, scale_clean, noise_mask, seq_len) else: # Original global modulation assert c is not None, "Either c or (c_noisy, c_clean) must be provided" scale = 1.0 + self.adaLN_modulation(c) scale = scale.unsqueeze(1) x = self.norm_final(x) * scale x = self.linear(x) return x class RopeEmbedder: def __init__( self, theta: float = 256.0, axes_dims: list[int] = (16, 56, 56), axes_lens: list[int] = (64, 128, 128), ): self.theta = theta self.axes_dims = axes_dims self.axes_lens = axes_lens assert len(axes_dims) == len(axes_lens), "axes_dims and axes_lens must have the same length" self.freqs_cis = None @staticmethod def precompute_freqs_cis(dim: list[int], end: list[int], theta: float = 256.0): with torch.device("cpu"): freqs_cis = [] for i, (d, e) in enumerate(zip(dim, end)): freqs = 1.0 / (theta ** (torch.arange(0, d, 2, dtype=torch.float64, device="cpu") / d)) timestep = torch.arange(e, device=freqs.device, dtype=torch.float64) freqs = torch.outer(timestep, freqs).float() freqs_cis_i = torch.polar(torch.ones_like(freqs), freqs).to(torch.complex64) # complex64 freqs_cis.append(freqs_cis_i) return freqs_cis def __call__(self, ids: torch.Tensor): assert ids.ndim == 2 assert ids.shape[-1] == len(self.axes_dims) device = ids.device if self.freqs_cis is None: self.freqs_cis = self.precompute_freqs_cis(self.axes_dims, self.axes_lens, theta=self.theta) self.freqs_cis = [freqs_cis.to(device) for freqs_cis in self.freqs_cis] else: # Ensure freqs_cis are on the same device as ids if self.freqs_cis[0].device != device: self.freqs_cis = [freqs_cis.to(device) for freqs_cis in self.freqs_cis] result = [] for i in range(len(self.axes_dims)): index = ids[:, i] result.append(self.freqs_cis[i][index]) return torch.cat(result, dim=-1) class ZImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = True _no_split_modules = ["ZImageTransformerBlock"] _repeated_blocks = ["ZImageTransformerBlock"] _skip_layerwise_casting_patterns = ["t_embedder", "cap_embedder"] # precision sensitive layers @register_to_config def __init__( self, all_patch_size=(2,), all_f_patch_size=(1,), in_channels=16, dim=3840, n_layers=30, n_refiner_layers=2, n_heads=30, n_kv_heads=30, norm_eps=1e-5, qk_norm=True, cap_feat_dim=2560, siglip_feat_dim=None, # Optional: set to enable SigLIP support for Omni rope_theta=256.0, t_scale=1000.0, axes_dims=[32, 48, 48], axes_lens=[1024, 512, 512], ) -> None: super().__init__() self.in_channels = in_channels self.out_channels = in_channels self.all_patch_size = all_patch_size self.all_f_patch_size = all_f_patch_size self.dim = dim self.n_heads = n_heads self.rope_theta = rope_theta self.t_scale = t_scale self.gradient_checkpointing = False assert len(all_patch_size) == len(all_f_patch_size) all_x_embedder = {} all_final_layer = {} for patch_idx, (patch_size, f_patch_size) in enumerate(zip(all_patch_size, all_f_patch_size)): x_embedder = nn.Linear(f_patch_size * patch_size * patch_size * in_channels, dim, bias=True) all_x_embedder[f"{patch_size}-{f_patch_size}"] = x_embedder final_layer = FinalLayer(dim, patch_size * patch_size * f_patch_size * self.out_channels) all_final_layer[f"{patch_size}-{f_patch_size}"] = final_layer self.all_x_embedder = nn.ModuleDict(all_x_embedder) self.all_final_layer = nn.ModuleDict(all_final_layer) self.noise_refiner = nn.ModuleList( [ ZImageTransformerBlock( 1000 + layer_id, dim, n_heads, n_kv_heads, norm_eps, qk_norm, modulation=True, ) for layer_id in range(n_refiner_layers) ] ) self.context_refiner = nn.ModuleList( [ ZImageTransformerBlock( layer_id, dim, n_heads, n_kv_heads, norm_eps, qk_norm, modulation=False, ) for layer_id in range(n_refiner_layers) ] ) self.t_embedder = TimestepEmbedder(min(dim, ADALN_EMBED_DIM), mid_size=1024) self.cap_embedder = nn.Sequential(RMSNorm(cap_feat_dim, eps=norm_eps), nn.Linear(cap_feat_dim, dim, bias=True)) # Optional SigLIP components (for Omni variant) if siglip_feat_dim is not None: self.siglip_embedder = nn.Sequential( RMSNorm(siglip_feat_dim, eps=norm_eps), nn.Linear(siglip_feat_dim, dim, bias=True) ) self.siglip_refiner = nn.ModuleList( [ ZImageTransformerBlock( 2000 + layer_id, dim, n_heads, n_kv_heads, norm_eps, qk_norm, modulation=False, ) for layer_id in range(n_refiner_layers) ] ) self.siglip_pad_token = nn.Parameter(torch.empty((1, dim))) else: self.siglip_embedder = None self.siglip_refiner = None self.siglip_pad_token = None self.x_pad_token = nn.Parameter(torch.empty((1, dim))) self.cap_pad_token = nn.Parameter(torch.empty((1, dim))) self.layers = nn.ModuleList( [ ZImageTransformerBlock(layer_id, dim, n_heads, n_kv_heads, norm_eps, qk_norm) for layer_id in range(n_layers) ] ) head_dim = dim // n_heads assert head_dim == sum(axes_dims) self.axes_dims = axes_dims self.axes_lens = axes_lens self.rope_embedder = RopeEmbedder(theta=rope_theta, axes_dims=axes_dims, axes_lens=axes_lens) def unpatchify( self, x: list[torch.Tensor], size: list[tuple], patch_size, f_patch_size, x_pos_offsets: list[tuple[int, int]] | None = None, ) -> list[torch.Tensor]: pH = pW = patch_size pF = f_patch_size bsz = len(x) assert len(size) == bsz if x_pos_offsets is not None: # Omni: extract target image from unified sequence (cond_images + target) result = [] for i in range(bsz): unified_x = x[i][x_pos_offsets[i][0] : x_pos_offsets[i][1]] cu_len = 0 x_item = None for j in range(len(size[i])): if size[i][j] is None: ori_len = 0 pad_len = SEQ_MULTI_OF cu_len += pad_len + ori_len else: F, H, W = size[i][j] ori_len = (F // pF) * (H // pH) * (W // pW) pad_len = (-ori_len) % SEQ_MULTI_OF x_item = ( unified_x[cu_len : cu_len + ori_len] .view(F // pF, H // pH, W // pW, pF, pH, pW, self.out_channels) .permute(6, 0, 3, 1, 4, 2, 5) .reshape(self.out_channels, F, H, W) ) cu_len += ori_len + pad_len result.append(x_item) # Return only the last (target) image return result else: # Original mode: simple unpatchify for i in range(bsz): F, H, W = size[i] ori_len = (F // pF) * (H // pH) * (W // pW) # "f h w pf ph pw c -> c (f pf) (h ph) (w pw)" x[i] = ( x[i][:ori_len] .view(F // pF, H // pH, W // pW, pF, pH, pW, self.out_channels) .permute(6, 0, 3, 1, 4, 2, 5) .reshape(self.out_channels, F, H, W) ) return x @staticmethod def create_coordinate_grid(size, start=None, device=None): if start is None: start = (0 for _ in size) axes = [torch.arange(x0, x0 + span, dtype=torch.int32, device=device) for x0, span in zip(start, size)] grids = torch.meshgrid(axes, indexing="ij") return torch.stack(grids, dim=-1) def _patchify_image(self, image: torch.Tensor, patch_size: int, f_patch_size: int): """Patchify a single image tensor: (C, F, H, W) -> (num_patches, patch_dim).""" pH, pW, pF = patch_size, patch_size, f_patch_size C, F, H, W = image.size() F_tokens, H_tokens, W_tokens = F // pF, H // pH, W // pW image = image.view(C, F_tokens, pF, H_tokens, pH, W_tokens, pW) image = image.permute(1, 3, 5, 2, 4, 6, 0).reshape(F_tokens * H_tokens * W_tokens, pF * pH * pW * C) return image, (F, H, W), (F_tokens, H_tokens, W_tokens) def _pad_with_ids( self, feat: torch.Tensor, pos_grid_size: tuple, pos_start: tuple, device: torch.device, noise_mask_val: int | None = None, ): """Pad feature to SEQ_MULTI_OF, create position IDs and pad mask.""" ori_len = len(feat) pad_len = (-ori_len) % SEQ_MULTI_OF total_len = ori_len + pad_len # Pos IDs ori_pos_ids = self.create_coordinate_grid(size=pos_grid_size, start=pos_start, device=device).flatten(0, 2) if pad_len > 0: pad_pos_ids = ( self.create_coordinate_grid(size=(1, 1, 1), start=(0, 0, 0), device=device) .flatten(0, 2) .repeat(pad_len, 1) ) pos_ids = torch.cat([ori_pos_ids, pad_pos_ids], dim=0) padded_feat = torch.cat([feat, feat[-1:].repeat(pad_len, 1)], dim=0) pad_mask = torch.cat( [ torch.zeros(ori_len, dtype=torch.bool, device=device), torch.ones(pad_len, dtype=torch.bool, device=device), ] ) else: pos_ids = ori_pos_ids padded_feat = feat pad_mask = torch.zeros(ori_len, dtype=torch.bool, device=device) noise_mask = [noise_mask_val] * total_len if noise_mask_val is not None else None # token level return padded_feat, pos_ids, pad_mask, total_len, noise_mask def patchify_and_embed( self, all_image: list[torch.Tensor], all_cap_feats: list[torch.Tensor], patch_size: int, f_patch_size: int ): """Patchify for basic mode: single image per batch item.""" device = all_image[0].device all_img_out, all_img_size, all_img_pos_ids, all_img_pad_mask = [], [], [], [] all_cap_out, all_cap_pos_ids, all_cap_pad_mask = [], [], [] for image, cap_feat in zip(all_image, all_cap_feats): # Caption cap_out, cap_pos_ids, cap_pad_mask, cap_len, _ = self._pad_with_ids( cap_feat, (len(cap_feat) + (-len(cap_feat)) % SEQ_MULTI_OF, 1, 1), (1, 0, 0), device ) all_cap_out.append(cap_out) all_cap_pos_ids.append(cap_pos_ids) all_cap_pad_mask.append(cap_pad_mask) # Image img_patches, size, (F_t, H_t, W_t) = self._patchify_image(image, patch_size, f_patch_size) img_out, img_pos_ids, img_pad_mask, _, _ = self._pad_with_ids( img_patches, (F_t, H_t, W_t), (cap_len + 1, 0, 0), device ) all_img_out.append(img_out) all_img_size.append(size) all_img_pos_ids.append(img_pos_ids) all_img_pad_mask.append(img_pad_mask) return ( all_img_out, all_cap_out, all_img_size, all_img_pos_ids, all_cap_pos_ids, all_img_pad_mask, all_cap_pad_mask, ) def patchify_and_embed_omni( self, all_x: list[list[torch.Tensor]], all_cap_feats: list[list[torch.Tensor]], all_siglip_feats: list[list[torch.Tensor]], patch_size: int, f_patch_size: int, images_noise_mask: list[list[int]], ): """Patchify for omni mode: multiple images per batch item with noise masks.""" bsz = len(all_x) device = all_x[0][-1].device dtype = all_x[0][-1].dtype all_x_out, all_x_size, all_x_pos_ids, all_x_pad_mask, all_x_len, all_x_noise_mask = [], [], [], [], [], [] all_cap_out, all_cap_pos_ids, all_cap_pad_mask, all_cap_len, all_cap_noise_mask = [], [], [], [], [] all_sig_out, all_sig_pos_ids, all_sig_pad_mask, all_sig_len, all_sig_noise_mask = [], [], [], [], [] for i in range(bsz): num_images = len(all_x[i]) cap_feats_list, cap_pos_list, cap_mask_list, cap_lens, cap_noise = [], [], [], [], [] cap_end_pos = [] cap_cu_len = 1 # Process captions for j, cap_item in enumerate(all_cap_feats[i]): noise_val = images_noise_mask[i][j] if j < len(images_noise_mask[i]) else 1 cap_out, cap_pos, cap_mask, cap_len, cap_nm = self._pad_with_ids( cap_item, (len(cap_item) + (-len(cap_item)) % SEQ_MULTI_OF, 1, 1), (cap_cu_len, 0, 0), device, noise_val, ) cap_feats_list.append(cap_out) cap_pos_list.append(cap_pos) cap_mask_list.append(cap_mask) cap_lens.append(cap_len) cap_noise.extend(cap_nm) cap_cu_len += len(cap_item) cap_end_pos.append(cap_cu_len) cap_cu_len += 2 # for image vae and siglip tokens all_cap_out.append(torch.cat(cap_feats_list, dim=0)) all_cap_pos_ids.append(torch.cat(cap_pos_list, dim=0)) all_cap_pad_mask.append(torch.cat(cap_mask_list, dim=0)) all_cap_len.append(cap_lens) all_cap_noise_mask.append(cap_noise) # Process images x_feats_list, x_pos_list, x_mask_list, x_lens, x_size, x_noise = [], [], [], [], [], [] for j, x_item in enumerate(all_x[i]): noise_val = images_noise_mask[i][j] if x_item is not None: x_patches, size, (F_t, H_t, W_t) = self._patchify_image(x_item, patch_size, f_patch_size) x_out, x_pos, x_mask, x_len, x_nm = self._pad_with_ids( x_patches, (F_t, H_t, W_t), (cap_end_pos[j], 0, 0), device, noise_val ) x_size.append(size) else: x_len = SEQ_MULTI_OF x_out = torch.zeros((x_len, X_PAD_DIM), dtype=dtype, device=device) x_pos = self.create_coordinate_grid((1, 1, 1), (0, 0, 0), device).flatten(0, 2).repeat(x_len, 1) x_mask = torch.ones(x_len, dtype=torch.bool, device=device) x_nm = [noise_val] * x_len x_size.append(None) x_feats_list.append(x_out) x_pos_list.append(x_pos) x_mask_list.append(x_mask) x_lens.append(x_len) x_noise.extend(x_nm) all_x_out.append(torch.cat(x_feats_list, dim=0)) all_x_pos_ids.append(torch.cat(x_pos_list, dim=0)) all_x_pad_mask.append(torch.cat(x_mask_list, dim=0)) all_x_size.append(x_size) all_x_len.append(x_lens) all_x_noise_mask.append(x_noise) # Process siglip if all_siglip_feats[i] is None: all_sig_len.append([0] * num_images) all_sig_out.append(None) else: sig_feats_list, sig_pos_list, sig_mask_list, sig_lens, sig_noise = [], [], [], [], [] for j, sig_item in enumerate(all_siglip_feats[i]): noise_val = images_noise_mask[i][j] if sig_item is not None: sig_H, sig_W, sig_C = sig_item.size() sig_flat = sig_item.permute(2, 0, 1).reshape(sig_H * sig_W, sig_C) sig_out, sig_pos, sig_mask, sig_len, sig_nm = self._pad_with_ids( sig_flat, (1, sig_H, sig_W), (cap_end_pos[j] + 1, 0, 0), device, noise_val ) # Scale position IDs to match x resolution if x_size[j] is not None: sig_pos = sig_pos.float() sig_pos[..., 1] = sig_pos[..., 1] / max(sig_H - 1, 1) * (x_size[j][1] - 1) sig_pos[..., 2] = sig_pos[..., 2] / max(sig_W - 1, 1) * (x_size[j][2] - 1) sig_pos = sig_pos.to(torch.int32) else: sig_len = SEQ_MULTI_OF sig_out = torch.zeros((sig_len, self.config.siglip_feat_dim), dtype=dtype, device=device) sig_pos = ( self.create_coordinate_grid((1, 1, 1), (0, 0, 0), device).flatten(0, 2).repeat(sig_len, 1) ) sig_mask = torch.ones(sig_len, dtype=torch.bool, device=device) sig_nm = [noise_val] * sig_len sig_feats_list.append(sig_out) sig_pos_list.append(sig_pos) sig_mask_list.append(sig_mask) sig_lens.append(sig_len) sig_noise.extend(sig_nm) all_sig_out.append(torch.cat(sig_feats_list, dim=0)) all_sig_pos_ids.append(torch.cat(sig_pos_list, dim=0)) all_sig_pad_mask.append(torch.cat(sig_mask_list, dim=0)) all_sig_len.append(sig_lens) all_sig_noise_mask.append(sig_noise) # Compute x position offsets all_x_pos_offsets = [(sum(all_cap_len[i]), sum(all_cap_len[i]) + sum(all_x_len[i])) for i in range(bsz)] return ( all_x_out, all_cap_out, all_sig_out, all_x_size, all_x_pos_ids, all_cap_pos_ids, all_sig_pos_ids, all_x_pad_mask, all_cap_pad_mask, all_sig_pad_mask, all_x_pos_offsets, all_x_noise_mask, all_cap_noise_mask, all_sig_noise_mask, ) def _prepare_sequence( self, feats: list[torch.Tensor], pos_ids: list[torch.Tensor], inner_pad_mask: list[torch.Tensor], pad_token: torch.nn.Parameter, noise_mask: list[list[int]] | None = None, device: torch.device = None, ): """Prepare sequence: apply pad token, RoPE embed, pad to batch, create attention mask.""" item_seqlens = [len(f) for f in feats] max_seqlen = max(item_seqlens) bsz = len(feats) # Pad token feats_cat = torch.cat(feats, dim=0) feats_cat[torch.cat(inner_pad_mask)] = pad_token feats = list(feats_cat.split(item_seqlens, dim=0)) # RoPE freqs_cis = list(self.rope_embedder(torch.cat(pos_ids, dim=0)).split([len(p) for p in pos_ids], dim=0)) # Pad to batch feats = pad_sequence(feats, batch_first=True, padding_value=0.0) freqs_cis = pad_sequence(freqs_cis, batch_first=True, padding_value=0.0)[:, : feats.shape[1]] # Attention mask attn_mask = torch.zeros((bsz, max_seqlen), dtype=torch.bool, device=device) for i, seq_len in enumerate(item_seqlens): attn_mask[i, :seq_len] = 1 # Noise mask noise_mask_tensor = None if noise_mask is not None: noise_mask_tensor = pad_sequence( [torch.tensor(m, dtype=torch.long, device=device) for m in noise_mask], batch_first=True, padding_value=0, )[:, : feats.shape[1]] return feats, freqs_cis, attn_mask, item_seqlens, noise_mask_tensor def _build_unified_sequence( self, x: torch.Tensor, x_freqs: torch.Tensor, x_seqlens: list[int], x_noise_mask: list[list[int]] | None, cap: torch.Tensor, cap_freqs: torch.Tensor, cap_seqlens: list[int], cap_noise_mask: list[list[int]] | None, siglip: torch.Tensor | None, siglip_freqs: torch.Tensor | None, siglip_seqlens: list[int] | None, siglip_noise_mask: list[list[int]] | None, omni_mode: bool, device: torch.device, ): """Build unified sequence: x, cap, and optionally siglip. Basic mode order: [x, cap]; Omni mode order: [cap, x, siglip] """ bsz = len(x_seqlens) unified = [] unified_freqs = [] unified_noise_mask = [] for i in range(bsz): x_len, cap_len = x_seqlens[i], cap_seqlens[i] if omni_mode: # Omni: [cap, x, siglip] if siglip is not None and siglip_seqlens is not None: sig_len = siglip_seqlens[i] unified.append(torch.cat([cap[i][:cap_len], x[i][:x_len], siglip[i][:sig_len]])) unified_freqs.append( torch.cat([cap_freqs[i][:cap_len], x_freqs[i][:x_len], siglip_freqs[i][:sig_len]]) ) unified_noise_mask.append( torch.tensor( cap_noise_mask[i] + x_noise_mask[i] + siglip_noise_mask[i], dtype=torch.long, device=device ) ) else: unified.append(torch.cat([cap[i][:cap_len], x[i][:x_len]])) unified_freqs.append(torch.cat([cap_freqs[i][:cap_len], x_freqs[i][:x_len]])) unified_noise_mask.append( torch.tensor(cap_noise_mask[i] + x_noise_mask[i], dtype=torch.long, device=device) ) else: # Basic: [x, cap] unified.append(torch.cat([x[i][:x_len], cap[i][:cap_len]])) unified_freqs.append(torch.cat([x_freqs[i][:x_len], cap_freqs[i][:cap_len]])) # Compute unified seqlens if omni_mode: if siglip is not None and siglip_seqlens is not None: unified_seqlens = [a + b + c for a, b, c in zip(cap_seqlens, x_seqlens, siglip_seqlens)] else: unified_seqlens = [a + b for a, b in zip(cap_seqlens, x_seqlens)] else: unified_seqlens = [a + b for a, b in zip(x_seqlens, cap_seqlens)] max_seqlen = max(unified_seqlens) # Pad to batch unified = pad_sequence(unified, batch_first=True, padding_value=0.0) unified_freqs = pad_sequence(unified_freqs, batch_first=True, padding_value=0.0) # Attention mask attn_mask = torch.zeros((bsz, max_seqlen), dtype=torch.bool, device=device) for i, seq_len in enumerate(unified_seqlens): attn_mask[i, :seq_len] = 1 # Noise mask noise_mask_tensor = None if omni_mode: noise_mask_tensor = pad_sequence(unified_noise_mask, batch_first=True, padding_value=0)[ :, : unified.shape[1] ] return unified, unified_freqs, attn_mask, noise_mask_tensor def forward( self, x: list[torch.Tensor, list[list[torch.Tensor]]], t, cap_feats: list[torch.Tensor, list[list[torch.Tensor]]], return_dict: bool = True, controlnet_block_samples: dict[int, torch.Tensor] | None = None, siglip_feats: list[list[torch.Tensor]] | None = None, image_noise_mask: list[list[int]] | None = None, patch_size: int = 2, f_patch_size: int = 1, ): """ Flow: patchify -> t_embed -> x_embed -> x_refine -> cap_embed -> cap_refine -> [siglip_embed -> siglip_refine] -> build_unified -> main_layers -> final_layer -> unpatchify """ assert patch_size in self.all_patch_size and f_patch_size in self.all_f_patch_size omni_mode = isinstance(x[0], list) device = x[0][-1].device if omni_mode else x[0].device if omni_mode: # Dual embeddings: noisy (t) and clean (t=1) t_noisy = self.t_embedder(t * self.t_scale).type_as(x[0][-1]) t_clean = self.t_embedder(torch.ones_like(t) * self.t_scale).type_as(x[0][-1]) adaln_input = None else: # Single embedding for all tokens adaln_input = self.t_embedder(t * self.t_scale).type_as(x[0]) t_noisy = t_clean = None # Patchify if omni_mode: ( x, cap_feats, siglip_feats, x_size, x_pos_ids, cap_pos_ids, siglip_pos_ids, x_pad_mask, cap_pad_mask, siglip_pad_mask, x_pos_offsets, x_noise_mask, cap_noise_mask, siglip_noise_mask, ) = self.patchify_and_embed_omni(x, cap_feats, siglip_feats, patch_size, f_patch_size, image_noise_mask) else: ( x, cap_feats, x_size, x_pos_ids, cap_pos_ids, x_pad_mask, cap_pad_mask, ) = self.patchify_and_embed(x, cap_feats, patch_size, f_patch_size) x_pos_offsets = x_noise_mask = cap_noise_mask = siglip_noise_mask = None # X embed & refine x_seqlens = [len(xi) for xi in x] x = self.all_x_embedder[f"{patch_size}-{f_patch_size}"](torch.cat(x, dim=0)) # embed x, x_freqs, x_mask, _, x_noise_tensor = self._prepare_sequence( list(x.split(x_seqlens, dim=0)), x_pos_ids, x_pad_mask, self.x_pad_token, x_noise_mask, device ) for layer in self.noise_refiner: x = ( self._gradient_checkpointing_func( layer, x, x_mask, x_freqs, adaln_input, x_noise_tensor, t_noisy, t_clean ) if torch.is_grad_enabled() and self.gradient_checkpointing else layer(x, x_mask, x_freqs, adaln_input, x_noise_tensor, t_noisy, t_clean) ) # Cap embed & refine cap_seqlens = [len(ci) for ci in cap_feats] cap_feats = self.cap_embedder(torch.cat(cap_feats, dim=0)) # embed cap_feats, cap_freqs, cap_mask, _, _ = self._prepare_sequence( list(cap_feats.split(cap_seqlens, dim=0)), cap_pos_ids, cap_pad_mask, self.cap_pad_token, None, device ) for layer in self.context_refiner: cap_feats = ( self._gradient_checkpointing_func(layer, cap_feats, cap_mask, cap_freqs) if torch.is_grad_enabled() and self.gradient_checkpointing else layer(cap_feats, cap_mask, cap_freqs) ) # Siglip embed & refine siglip_seqlens = siglip_freqs = None if omni_mode and siglip_feats[0] is not None and self.siglip_embedder is not None: siglip_seqlens = [len(si) for si in siglip_feats] siglip_feats = self.siglip_embedder(torch.cat(siglip_feats, dim=0)) # embed siglip_feats, siglip_freqs, siglip_mask, _, _ = self._prepare_sequence( list(siglip_feats.split(siglip_seqlens, dim=0)), siglip_pos_ids, siglip_pad_mask, self.siglip_pad_token, None, device, ) for layer in self.siglip_refiner: siglip_feats = ( self._gradient_checkpointing_func(layer, siglip_feats, siglip_mask, siglip_freqs) if torch.is_grad_enabled() and self.gradient_checkpointing else layer(siglip_feats, siglip_mask, siglip_freqs) ) # Unified sequence unified, unified_freqs, unified_mask, unified_noise_tensor = self._build_unified_sequence( x, x_freqs, x_seqlens, x_noise_mask, cap_feats, cap_freqs, cap_seqlens, cap_noise_mask, siglip_feats, siglip_freqs, siglip_seqlens, siglip_noise_mask, omni_mode, device, ) # Main transformer layers for layer_idx, layer in enumerate(self.layers): unified = ( self._gradient_checkpointing_func( layer, unified, unified_mask, unified_freqs, adaln_input, unified_noise_tensor, t_noisy, t_clean ) if torch.is_grad_enabled() and self.gradient_checkpointing else layer(unified, unified_mask, unified_freqs, adaln_input, unified_noise_tensor, t_noisy, t_clean) ) if controlnet_block_samples is not None and layer_idx in controlnet_block_samples: unified = unified + controlnet_block_samples[layer_idx] unified = ( self.all_final_layer[f"{patch_size}-{f_patch_size}"]( unified, noise_mask=unified_noise_tensor, c_noisy=t_noisy, c_clean=t_clean ) if omni_mode else self.all_final_layer[f"{patch_size}-{f_patch_size}"](unified, c=adaln_input) ) # Unpatchify x = self.unpatchify(list(unified.unbind(dim=0)), x_size, patch_size, f_patch_size, x_pos_offsets) return (x,) if not return_dict else Transformer2DModelOutput(sample=x)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_z_image.py", "license": "Apache License 2.0", "lines": 914, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/z_image/pipeline_output.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass import numpy as np import PIL.Image from diffusers.utils import BaseOutput @dataclass class ZImagePipelineOutput(BaseOutput): """ Output class for Z-Image pipelines. Args: images (`list[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: list[PIL.Image.Image, np.ndarray]
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/z_image/pipeline_output.py", "license": "Apache License 2.0", "lines": 27, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/z_image/pipeline_z_image.py
# Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable import torch from transformers import AutoTokenizer, PreTrainedModel from ...image_processor import VaeImageProcessor from ...loaders import FromSingleFileMixin, ZImageLoraLoaderMixin from ...models.autoencoders import AutoencoderKL from ...models.transformers import ZImageTransformer2DModel from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from .pipeline_output import ZImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import ZImagePipeline >>> pipe = ZImagePipeline.from_pretrained("Z-a-o/Z-Image-Turbo", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> # Optionally, set the attention backend to flash-attn 2 or 3, default is SDPA in PyTorch. >>> # (1) Use flash attention 2 >>> # pipe.transformer.set_attention_backend("flash") >>> # (2) Use flash attention 3 >>> # pipe.transformer.set_attention_backend("_flash_3") >>> prompt = "一幅为名为“造相「Z-IMAGE-TURBO」”的项目设计的创意海报。画面巧妙地将文字概念视觉化:一辆复古蒸汽小火车化身为巨大的拉链头,正拉开厚厚的冬日积雪,展露出一个生机盎然的春天。" >>> image = pipe( ... prompt, ... height=1024, ... width=1024, ... num_inference_steps=9, ... guidance_scale=0.0, ... generator=torch.Generator("cuda").manual_seed(42), ... ).images[0] >>> image.save("zimage.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class ZImagePipeline(DiffusionPipeline, ZImageLoraLoaderMixin, FromSingleFileMixin): model_cpu_offload_seq = "text_encoder->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: PreTrainedModel, tokenizer: AutoTokenizer, transformer: ZImageTransformer2DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, transformer=transformer, ) self.vae_scale_factor = ( 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 ) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, do_classifier_free_guidance: bool = True, negative_prompt: str | list[str] | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, max_sequence_length: int = 512, ): prompt = [prompt] if isinstance(prompt, str) else prompt prompt_embeds = self._encode_prompt( prompt=prompt, device=device, prompt_embeds=prompt_embeds, max_sequence_length=max_sequence_length, ) if do_classifier_free_guidance: if negative_prompt is None: negative_prompt = ["" for _ in prompt] else: negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt assert len(prompt) == len(negative_prompt) negative_prompt_embeds = self._encode_prompt( prompt=negative_prompt, device=device, prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, ) else: negative_prompt_embeds = [] return prompt_embeds, negative_prompt_embeds def _encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, max_sequence_length: int = 512, ) -> list[torch.FloatTensor]: device = device or self._execution_device if prompt_embeds is not None: return prompt_embeds if isinstance(prompt, str): prompt = [prompt] for i, prompt_item in enumerate(prompt): messages = [ {"role": "user", "content": prompt_item}, ] prompt_item = self.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, enable_thinking=True, ) prompt[i] = prompt_item text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids.to(device) prompt_masks = text_inputs.attention_mask.to(device).bool() prompt_embeds = self.text_encoder( input_ids=text_input_ids, attention_mask=prompt_masks, output_hidden_states=True, ).hidden_states[-2] embeddings_list = [] for i in range(len(prompt_embeds)): embeddings_list.append(prompt_embeds[i][prompt_masks[i]]) return embeddings_list def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) return latents @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 0 @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 50, sigmas: list[float] | None = None, guidance_scale: float = 5.0, cfg_normalization: bool = False, cfg_truncation: float = 1.0, negative_prompt: str | list[str] | None = None, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: list[torch.FloatTensor] | None = None, negative_prompt_embeds: list[torch.FloatTensor] | None = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to 1024): The height in pixels of the generated image. width (`int`, *optional*, defaults to 1024): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. cfg_normalization (`bool`, *optional*, defaults to False): Whether to apply configuration normalization. cfg_truncation (`float`, *optional*, defaults to 1.0): The truncation value for configuration. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`list[torch.FloatTensor]`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`list[torch.FloatTensor]`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.ZImagePipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, *optional*, defaults to 512): Maximum sequence length to use with the `prompt`. Examples: Returns: [`~pipelines.z_image.ZImagePipelineOutput`] or `tuple`: [`~pipelines.z_image.ZImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ height = height or 1024 width = width or 1024 vae_scale = self.vae_scale_factor * 2 if height % vae_scale != 0: raise ValueError( f"Height must be divisible by {vae_scale} (got {height}). " f"Please adjust the height to a multiple of {vae_scale}." ) if width % vae_scale != 0: raise ValueError( f"Width must be divisible by {vae_scale} (got {width}). " f"Please adjust the width to a multiple of {vae_scale}." ) device = self._execution_device self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False self._cfg_normalization = cfg_normalization self._cfg_truncation = cfg_truncation # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = len(prompt_embeds) # If prompt_embeds is provided and prompt is None, skip encoding if prompt_embeds is not None and prompt is None: if self.do_classifier_free_guidance and negative_prompt_embeds is None: raise ValueError( "When `prompt_embeds` is provided without `prompt`, " "`negative_prompt_embeds` must also be provided for classifier-free guidance." ) else: ( prompt_embeds, negative_prompt_embeds, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, device=device, max_sequence_length=max_sequence_length, ) # 4. Prepare latent variables num_channels_latents = self.transformer.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, torch.float32, device, generator, latents, ) # Repeat prompt_embeds for num_images_per_prompt if num_images_per_prompt > 1: prompt_embeds = [pe for pe in prompt_embeds for _ in range(num_images_per_prompt)] if self.do_classifier_free_guidance and negative_prompt_embeds: negative_prompt_embeds = [npe for npe in negative_prompt_embeds for _ in range(num_images_per_prompt)] actual_batch_size = batch_size * num_images_per_prompt image_seq_len = (latents.shape[2] // 2) * (latents.shape[3] // 2) # 5. Prepare timesteps mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) self.scheduler.sigma_min = 0.0 scheduler_kwargs = {"mu": mu} timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, **scheduler_kwargs, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latents.shape[0]) timestep = (1000 - timestep) / 1000 # Normalized time for time-aware config (0 at start, 1 at end) t_norm = timestep[0].item() # Handle cfg truncation current_guidance_scale = self.guidance_scale if ( self.do_classifier_free_guidance and self._cfg_truncation is not None and float(self._cfg_truncation) <= 1 ): if t_norm > self._cfg_truncation: current_guidance_scale = 0.0 # Run CFG only if configured AND scale is non-zero apply_cfg = self.do_classifier_free_guidance and current_guidance_scale > 0 if apply_cfg: latents_typed = latents.to(self.transformer.dtype) latent_model_input = latents_typed.repeat(2, 1, 1, 1) prompt_embeds_model_input = prompt_embeds + negative_prompt_embeds timestep_model_input = timestep.repeat(2) else: latent_model_input = latents.to(self.transformer.dtype) prompt_embeds_model_input = prompt_embeds timestep_model_input = timestep latent_model_input = latent_model_input.unsqueeze(2) latent_model_input_list = list(latent_model_input.unbind(dim=0)) model_out_list = self.transformer( latent_model_input_list, timestep_model_input, prompt_embeds_model_input, return_dict=False )[0] if apply_cfg: # Perform CFG pos_out = model_out_list[:actual_batch_size] neg_out = model_out_list[actual_batch_size:] noise_pred = [] for j in range(actual_batch_size): pos = pos_out[j].float() neg = neg_out[j].float() pred = pos + current_guidance_scale * (pos - neg) # Renormalization if self._cfg_normalization and float(self._cfg_normalization) > 0.0: ori_pos_norm = torch.linalg.vector_norm(pos) new_pos_norm = torch.linalg.vector_norm(pred) max_new_norm = ori_pos_norm * float(self._cfg_normalization) if new_pos_norm > max_new_norm: pred = pred * (max_new_norm / new_pos_norm) noise_pred.append(pred) noise_pred = torch.stack(noise_pred, dim=0) else: noise_pred = torch.stack([t.float() for t in model_out_list], dim=0) noise_pred = noise_pred.squeeze(2) noise_pred = -noise_pred # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred.to(torch.float32), t, latents, return_dict=False)[0] assert latents.dtype == torch.float32 if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == "latent": image = latents else: latents = latents.to(self.vae.dtype) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ZImagePipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/z_image/pipeline_z_image.py", "license": "Apache License 2.0", "lines": 518, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:examples/community/pipline_flux_fill_controlnet_Inpaint.py
import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL import torch from transformers import ( CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast, ) from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin from diffusers.models.autoencoders import AutoencoderKL from diffusers.models.controlnets.controlnet_flux import FluxControlNetModel, FluxMultiControlNetModel from diffusers.models.transformers import FluxTransformer2DModel from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.utils import ( USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import FluxControlNetInpaintPipeline >>> from diffusers.models import FluxControlNetModel >>> from diffusers.utils import load_image >>> controlnet = FluxControlNetModel.from_pretrained( ... "InstantX/FLUX.1-dev-controlnet-canny", torch_dtype=torch.float16 ... ) >>> pipe = FluxControlNetInpaintPipeline.from_pretrained( ... "black-forest-labs/FLUX.1-schnell", controlnet=controlnet, torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> control_image = load_image( ... "https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Canny-alpha/resolve/main/canny.jpg" ... ) >>> init_image = load_image( ... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" ... ) >>> mask_image = load_image( ... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" ... ) >>> prompt = "A girl holding a sign that says InstantX" >>> image = pipe( ... prompt, ... image=init_image, ... mask_image=mask_image, ... control_image=control_image, ... control_guidance_start=0.2, ... control_guidance_end=0.8, ... controlnet_conditioning_scale=0.7, ... strength=0.7, ... num_inference_steps=28, ... guidance_scale=3.5, ... ).images[0] >>> image.save("flux_controlnet_inpaint.png") ``` """ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift def calculate_shift( image_seq_len, base_seq_len: int = 256, max_seq_len: int = 4096, base_shift: float = 0.5, max_shift: float = 1.15, ): m = (max_shift - base_shift) / (max_seq_len - base_seq_len) b = base_shift - m * base_seq_len mu = image_seq_len * m + b return mu # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def retrieve_latents_fill( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, sigmas: Optional[List[float]] = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`List[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class FluxControlNetFillInpaintPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin): r""" The Flux controlnet pipeline for inpainting. Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ Args: transformer ([`FluxTransformer2DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _optional_components = [] _callback_tensor_inputs = ["latents", "prompt_embeds", "control_image", "mask", "masked_image_latents"] def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, text_encoder_2: T5EncoderModel, tokenizer_2: T5TokenizerFast, transformer: FluxTransformer2DModel, controlnet: Union[ FluxControlNetModel, List[FluxControlNetModel], Tuple[FluxControlNetModel], FluxMultiControlNetModel ], ): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = FluxMultiControlNetModel(controlnet) self.register_modules( scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, transformer=transformer, controlnet=controlnet, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) latent_channels = self.vae.config.latent_channels if getattr(self, "vae", None) else 16 self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor * 2, vae_latent_channels=latent_channels, do_normalize=False, do_binarize=True, do_convert_grayscale=True, ) self.tokenizer_max_length = ( self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 ) self.default_sample_size = 128 # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, num_images_per_prompt: int = 1, max_sequence_length: int = 512, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2) text_inputs = self.tokenizer_2( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, return_length=False, return_overflowing_tokens=False, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because `max_sequence_length` is set to " f" {max_sequence_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] dtype = self.text_encoder_2.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) _, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_clip_prompt_embeds def _get_clip_prompt_embeds( self, prompt: Union[str, List[str]], num_images_per_prompt: int = 1, device: Optional[torch.device] = None, ): device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt batch_size = len(prompt) if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer_max_length, truncation=True, return_overflowing_tokens=False, return_length=False, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) # Use pooled output of CLIPTextModel prompt_embeds = prompt_embeds.pooler_output prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) return prompt_embeds # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_prompt def encode_prompt( self, prompt: Union[str, List[str]], prompt_2: Union[str, List[str]], device: Optional[torch.device] = None, num_images_per_prompt: int = 1, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, max_sequence_length: int = 512, lora_scale: Optional[float] = None, ): r""" Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in all text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # We only use the pooled prompt output from the CLIPTextModel pooled_prompt_embeds = self._get_clip_prompt_embeds( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, ) prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt_2, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, ) if self.text_encoder is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) return prompt_embeds, pooled_prompt_embeds, text_ids # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_inpaint.StableDiffusion3InpaintPipeline._encode_vae_image def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor return image_latents # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(num_inference_steps * strength, num_inference_steps) t_start = int(max(num_inference_steps - init_timestep, 0)) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start def check_inputs( self, prompt, prompt_2, image, mask_image, strength, height, width, output_type, prompt_embeds=None, pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, padding_mask_crop=None, max_sequence_length=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: logger.warning( f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if padding_mask_crop is not None: if not isinstance(image, PIL.Image.Image): raise ValueError( f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}." ) if not isinstance(mask_image, PIL.Image.Image): raise ValueError( f"The mask image should be a PIL image when inpainting mask crop, but is of type" f" {type(mask_image)}." ) if output_type != "pil": raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.") if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height, width, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape( latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents def _unpack_latents(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (vae_scale_factor * 2)) width = 2 * (int(width) // (vae_scale_factor * 2)) latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height, width) return latents def prepare_latents( self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) shape = (batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // image_latents.shape[0] image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0) elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts." ) else: image_latents = torch.cat([image_latents], dim=0) if latents is None: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = self.scheduler.scale_noise(image_latents, timestep, noise) else: noise = latents.to(device) latents = noise noise = self._pack_latents(noise, batch_size, num_channels_latents, height, width) image_latents = self._pack_latents(image_latents, batch_size, num_channels_latents, height, width) latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) return latents, noise, image_latents, latent_image_ids def prepare_mask_latents( self, mask, masked_image, batch_size, num_channels_latents, num_images_per_prompt, height, width, dtype, device, generator, ): # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate(mask, size=(height, width)) mask = mask.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt masked_image = masked_image.to(device=device, dtype=dtype) if masked_image.shape[1] == 16: masked_image_latents = masked_image else: masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator) masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) masked_image_latents = self._pack_latents( masked_image_latents, batch_size, num_channels_latents, height, width, ) mask = self._pack_latents( mask.repeat(1, num_channels_latents, 1, 1), batch_size, num_channels_latents, height, width, ) return mask, masked_image_latents # Copied from diffusers.pipelines.controlnet_sd3.pipeline_stable_diffusion_3_controlnet.StableDiffusion3ControlNetPipeline.prepare_image def prepare_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): if isinstance(image, torch.Tensor): pass else: image = self.image_processor.preprocess(image, height=height, width=width) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image def prepare_mask_latents_fill( self, mask, masked_image, batch_size, num_channels_latents, num_images_per_prompt, height, width, dtype, device, generator, ): # 1. calculate the height and width of the latents # VAE applies 8x compression on images but we must also account for packing which requires # latent height and width to be divisible by 2. height = 2 * (int(height) // (self.vae_scale_factor * 2)) width = 2 * (int(width) // (self.vae_scale_factor * 2)) # 2. encode the masked image if masked_image.shape[1] == num_channels_latents: masked_image_latents = masked_image else: masked_image_latents = retrieve_latents_fill(self.vae.encode(masked_image), generator=generator) masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) # 3. duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method batch_size = batch_size * num_images_per_prompt if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) # 4. pack the masked_image_latents # batch_size, num_channels_latents, height, width -> batch_size, height//2 * width//2 , num_channels_latents*4 masked_image_latents = self._pack_latents( masked_image_latents, batch_size, num_channels_latents, height, width, ) # 5.resize mask to latents shape we we concatenate the mask to the latents mask = mask[:, 0, :, :] # batch_size, 8 * height, 8 * width (mask has not been 8x compressed) mask = mask.view( batch_size, height, self.vae_scale_factor, width, self.vae_scale_factor ) # batch_size, height, 8, width, 8 mask = mask.permute(0, 2, 4, 1, 3) # batch_size, 8, 8, height, width mask = mask.reshape( batch_size, self.vae_scale_factor * self.vae_scale_factor, height, width ) # batch_size, 8*8, height, width # 6. pack the mask: # batch_size, 64, height, width -> batch_size, height//2 * width//2 , 64*2*2 mask = self._pack_latents( mask, batch_size, self.vae_scale_factor * self.vae_scale_factor, height, width, ) mask = mask.to(device=device, dtype=dtype) return mask, masked_image_latents @property def guidance_scale(self): return self._guidance_scale @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, mask_image: PipelineImageInput = None, masked_image_latents: PipelineImageInput = None, control_image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.6, padding_mask_crop: Optional[int] = None, sigmas: Optional[List[float]] = None, num_inference_steps: int = 28, guidance_scale: float = 7.0, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, control_mode: Optional[Union[int, List[int]]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: Optional[Dict[str, Any]] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], max_sequence_length: int = 512, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): The image(s) to inpaint. mask_image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): The mask image(s) to use for inpainting. White pixels in the mask will be repainted, while black pixels will be preserved. masked_image_latents (`torch.FloatTensor`, *optional*): Pre-generated masked image latents. control_image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): The ControlNet input condition. Image to control the generation. height (`int`, *optional*, defaults to self.default_sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.default_sample_size * self.vae_scale_factor): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.6): Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. padding_mask_crop (`int`, *optional*): The size of the padding to use when cropping the mask. num_inference_steps (`int`, *optional*, defaults to 28): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. sigmas (`List[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 7.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. control_mode (`int` or `List[int]`, *optional*): The mode for the ControlNet. If multiple ControlNets are used, this should be a list. controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original transformer. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or more [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): Additional keyword arguments to be passed to the joint attention mechanism. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising step during the inference. callback_on_step_end_tensor_inputs (`List[str]`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. max_sequence_length (`int`, *optional*, defaults to 512): The maximum length of the sequence to be generated. Examples: Returns: [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor global_height = height global_width = width if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(self.controlnet.nets) if isinstance(self.controlnet, FluxMultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs self.check_inputs( prompt, prompt_2, image, mask_image, strength, height, width, output_type=output_type, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, padding_mask_crop=padding_mask_crop, max_sequence_length=max_sequence_length, ) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device dtype = self.transformer.dtype # 3. Encode input prompt lora_scale = ( self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None ) prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale, ) # 4. Preprocess mask and image if padding_mask_crop is not None: crops_coords = self.mask_processor.get_crop_region( mask_image, global_width, global_height, pad=padding_mask_crop ) resize_mode = "fill" else: crops_coords = None resize_mode = "default" original_image = image init_image = self.image_processor.preprocess( image, height=global_height, width=global_width, crops_coords=crops_coords, resize_mode=resize_mode ) init_image = init_image.to(dtype=torch.float32) # 5. Prepare control image # num_channels_latents = self.transformer.config.in_channels // 4 num_channels_latents = self.vae.config.latent_channels if isinstance(self.controlnet, FluxControlNetModel): control_image = self.prepare_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.vae.dtype, ) height, width = control_image.shape[-2:] # xlab controlnet has a input_hint_block and instantx controlnet does not controlnet_blocks_repeat = False if self.controlnet.input_hint_block is None else True if self.controlnet.input_hint_block is None: # vae encode control_image = retrieve_latents(self.vae.encode(control_image), generator=generator) control_image = (control_image - self.vae.config.shift_factor) * self.vae.config.scaling_factor # pack height_control_image, width_control_image = control_image.shape[2:] control_image = self._pack_latents( control_image, batch_size * num_images_per_prompt, num_channels_latents, height_control_image, width_control_image, ) # set control mode if control_mode is not None: control_mode = torch.tensor(control_mode).to(device, dtype=torch.long) control_mode = control_mode.reshape([-1, 1]) elif isinstance(self.controlnet, FluxMultiControlNetModel): control_images = [] # xlab controlnet has a input_hint_block and instantx controlnet does not controlnet_blocks_repeat = False if self.controlnet.nets[0].input_hint_block is None else True for i, control_image_ in enumerate(control_image): control_image_ = self.prepare_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.vae.dtype, ) height, width = control_image_.shape[-2:] if self.controlnet.nets[0].input_hint_block is None: # vae encode control_image_ = retrieve_latents(self.vae.encode(control_image_), generator=generator) control_image_ = (control_image_ - self.vae.config.shift_factor) * self.vae.config.scaling_factor # pack height_control_image, width_control_image = control_image_.shape[2:] control_image_ = self._pack_latents( control_image_, batch_size * num_images_per_prompt, num_channels_latents, height_control_image, width_control_image, ) control_images.append(control_image_) control_image = control_images # set control mode control_mode_ = [] if isinstance(control_mode, list): for cmode in control_mode: if cmode is None: control_mode_.append(-1) else: control_mode_.append(cmode) control_mode = torch.tensor(control_mode_).to(device, dtype=torch.long) control_mode = control_mode.reshape([-1, 1]) # 6. Prepare timesteps sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas image_seq_len = (int(global_height) // self.vae_scale_factor // 2) * ( int(global_width) // self.vae_scale_factor // 2 ) mu = calculate_shift( image_seq_len, self.scheduler.config.get("base_image_seq_len", 256), self.scheduler.config.get("max_image_seq_len", 4096), self.scheduler.config.get("base_shift", 0.5), self.scheduler.config.get("max_shift", 1.15), ) timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, sigmas=sigmas, mu=mu, ) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 7. Prepare latent variables latents, noise, image_latents, latent_image_ids = self.prepare_latents( init_image, latent_timestep, batch_size * num_images_per_prompt, num_channels_latents, global_height, global_width, prompt_embeds.dtype, device, generator, latents, ) # 8. Prepare mask latents mask_condition = self.mask_processor.preprocess( mask_image, height=global_height, width=global_width, resize_mode=resize_mode, crops_coords=crops_coords ) if masked_image_latents is None: masked_image = init_image * (mask_condition < 0.5) else: masked_image = masked_image_latents mask, masked_image_latents = self.prepare_mask_latents( mask_condition, masked_image, batch_size, num_channels_latents, num_images_per_prompt, global_height, global_width, prompt_embeds.dtype, device, generator, ) mask_image_fill = self.mask_processor.preprocess(mask_image, height=height, width=width) masked_image_fill = init_image * (1 - mask_image_fill) masked_image_fill = masked_image_fill.to(dtype=self.vae.dtype, device=device) mask_fill, masked_latents_fill = self.prepare_mask_latents_fill( mask_image_fill, masked_image_fill, batch_size, num_channels_latents, num_images_per_prompt, height, width, prompt_embeds.dtype, device, generator, ) controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(self.controlnet, FluxControlNetModel) else keeps) # 9. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue timestep = t.expand(latents.shape[0]).to(latents.dtype) # predict the noise residual if isinstance(self.controlnet, FluxMultiControlNetModel): use_guidance = self.controlnet.nets[0].config.guidance_embeds else: use_guidance = self.controlnet.config.guidance_embeds if use_guidance: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) else: guidance = None if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] controlnet_block_samples, controlnet_single_block_samples = self.controlnet( hidden_states=latents, controlnet_cond=control_image, controlnet_mode=control_mode, conditioning_scale=cond_scale, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, ) if self.transformer.config.guidance_embeds: guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) guidance = guidance.expand(latents.shape[0]) else: guidance = None masked_image_latents_fill = torch.cat((masked_latents_fill, mask_fill), dim=-1) latent_model_input = torch.cat([latents, masked_image_latents_fill], dim=2) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep / 1000, guidance=guidance, pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, controlnet_block_samples=controlnet_block_samples, controlnet_single_block_samples=controlnet_single_block_samples, txt_ids=text_ids, img_ids=latent_image_ids, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, controlnet_blocks_repeat=controlnet_blocks_repeat, )[0] # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] # For inpainting, we need to apply the mask and add the masked image latents init_latents_proper = image_latents init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.scale_noise( init_latents_proper, torch.tensor([noise_timestep]), noise ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) # call the callback, if provided if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) control_image = callback_outputs.pop("control_image", control_image) mask = callback_outputs.pop("mask", mask) masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() # Post-processing if output_type == "latent": image = latents else: latents = self._unpack_latents(latents, global_height, global_width, self.vae_scale_factor) latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return FluxPipelineOutput(images=image)
{ "repo_id": "huggingface/diffusers", "file_path": "examples/community/pipline_flux_fill_controlnet_Inpaint.py", "license": "Apache License 2.0", "lines": 1148, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:src/diffusers/pipelines/sana_video/pipeline_output.py
from dataclasses import dataclass import torch from ...utils import BaseOutput @dataclass class SanaVideoPipelineOutput(BaseOutput): r""" Output class for Sana-Video pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or list[list[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: torch.Tensor
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/sana_video/pipeline_output.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:src/diffusers/pipelines/sana_video/pipeline_sana_video_i2v.py
# Copyright 2025 SANA-Video Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html import inspect import re import urllib.parse as ul import warnings from typing import Any, Callable import PIL import torch from transformers import Gemma2PreTrainedModel, GemmaTokenizer, GemmaTokenizerFast from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import SanaLoraLoaderMixin from ...models import AutoencoderDC, AutoencoderKLWan, SanaVideoTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( BACKENDS_MAPPING, USE_PEFT_BACKEND, is_bs4_available, is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import get_device, is_torch_version, randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import SanaVideoPipelineOutput from .pipeline_sana_video import ASPECT_RATIO_480_BIN, ASPECT_RATIO_720_BIN if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_bs4_available(): from bs4 import BeautifulSoup if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import SanaImageToVideoPipeline >>> from diffusers.utils import export_to_video, load_image >>> pipe = SanaImageToVideoPipeline.from_pretrained("Efficient-Large-Model/SANA-Video_2B_480p_diffusers") >>> pipe.transformer.to(torch.bfloat16) >>> pipe.text_encoder.to(torch.bfloat16) >>> pipe.vae.to(torch.float32) >>> pipe.to("cuda") >>> motion_score = 30 >>> prompt = "A woman stands against a stunning sunset backdrop, her long, wavy brown hair gently blowing in the breeze. She wears a sleeveless, light-colored blouse with a deep V-neckline, which accentuates her graceful posture. The warm hues of the setting sun cast a golden glow across her face and hair, creating a serene and ethereal atmosphere. The background features a blurred landscape with soft, rolling hills and scattered clouds, adding depth to the scene. The camera remains steady, capturing the tranquil moment from a medium close-up angle." >>> negative_prompt = "A chaotic sequence with misshapen, deformed limbs in heavy motion blur, sudden disappearance, jump cuts, jerky movements, rapid shot changes, frames out of sync, inconsistent character shapes, temporal artifacts, jitter, and ghosting effects, creating a disorienting visual experience." >>> motion_prompt = f" motion score: {motion_score}." >>> prompt = prompt + motion_prompt >>> image = load_image("https://raw.githubusercontent.com/NVlabs/Sana/refs/heads/main/asset/samples/i2v-1.png") >>> output = pipe( ... image=image, ... prompt=prompt, ... negative_prompt=negative_prompt, ... height=480, ... width=832, ... frames=81, ... guidance_scale=6, ... num_inference_steps=50, ... generator=torch.Generator(device="cuda").manual_seed(42), ... ).frames[0] >>> export_to_video(output, "sana-ti2v-output.mp4", fps=16) ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: int | None = None, device: str | torch.device | None = None, timesteps: list[int] | None = None, sigmas: list[float] | None = None, **kwargs, ): r""" Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`list[int]`, *optional*): Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, `num_inference_steps` and `sigmas` must be `None`. sigmas (`list[float]`, *optional*): Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, `num_inference_steps` and `timesteps` must be `None`. Returns: `tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None and sigmas is not None: raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) elif sigmas is not None: accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accept_sigmas: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" sigmas schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class SanaImageToVideoPipeline(DiffusionPipeline, SanaLoraLoaderMixin): r""" Pipeline for image/text-to-video generation using [Sana](https://huggingface.co/papers/2509.24695). This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: tokenizer ([`GemmaTokenizer`] or [`GemmaTokenizerFast`]): The tokenizer used to tokenize the prompt. text_encoder ([`Gemma2PreTrainedModel`]): Text encoder model to encode the input prompts. vae ([`AutoencoderKLWan` or `AutoencoderDCAEV`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. transformer ([`SanaVideoTransformer3DModel`]): Conditional Transformer to denoise the input latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded video latents. """ # fmt: off bad_punct_regex = re.compile(r"[" + "#®•©™&@·º½¾¿¡§~" + r"\)" + r"\(" + r"\]" + r"\[" + r"\}" + r"\{" + r"\|" + "\\" + r"\/" + r"\*" + r"]{1,}") # fmt: on model_cpu_offload_seq = "text_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: GemmaTokenizer | GemmaTokenizerFast, text_encoder: Gemma2PreTrainedModel, vae: AutoencoderDC | AutoencoderKLWan, transformer: SanaVideoTransformer3DModel, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler ) self.vae_scale_factor_temporal = self.vae.config.scale_factor_temporal if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.config.scale_factor_spatial if getattr(self, "vae", None) else 8 self.vae_scale_factor = self.vae_scale_factor_spatial self.transformer_spatial_patch_size = ( self.transformer.config.patch_size[1] if getattr(self, "transformer", None) is not None else 1 ) self.transformer_temporal_patch_size = ( self.transformer.config.patch_size[0] if getattr(self, "transformer") is not None else 1 ) self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) # Copied from diffusers.pipelines.sana.pipeline_sana.SanaPipeline._get_gemma_prompt_embeds def _get_gemma_prompt_embeds( self, prompt: str | list[str], device: torch.device, dtype: torch.dtype, clean_caption: bool = False, max_sequence_length: int = 300, complex_human_instruction: list[str] | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`, *optional*): torch device to place the resulting embeddings on clean_caption (`bool`, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt. complex_human_instruction (`list[str]`, defaults to `complex_human_instruction`): If `complex_human_instruction` is not empty, the function will use the complex Human instruction for the prompt. """ prompt = [prompt] if isinstance(prompt, str) else prompt if getattr(self, "tokenizer", None) is not None: self.tokenizer.padding_side = "right" prompt = self._text_preprocessing(prompt, clean_caption=clean_caption) # prepare complex human instruction if not complex_human_instruction: max_length_all = max_sequence_length else: chi_prompt = "\n".join(complex_human_instruction) prompt = [chi_prompt + p for p in prompt] num_chi_prompt_tokens = len(self.tokenizer.encode(chi_prompt)) max_length_all = num_chi_prompt_tokens + max_sequence_length - 2 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_length_all, truncation=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids prompt_attention_mask = text_inputs.attention_mask prompt_attention_mask = prompt_attention_mask.to(device) prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask) prompt_embeds = prompt_embeds[0].to(dtype=dtype, device=device) return prompt_embeds, prompt_attention_mask # Copied from diffusers.pipelines.sana_video.pipeline_sana_video.SanaVideoPipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], do_classifier_free_guidance: bool = True, negative_prompt: str = "", num_videos_per_prompt: int = 1, device: torch.device | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, clean_caption: bool = False, max_sequence_length: int = 300, complex_human_instruction: list[str] | None = None, lora_scale: float | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For PixArt-Alpha, this should be "". do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): whether to use classifier free guidance or not num_videos_per_prompt (`int`, *optional*, defaults to 1): number of videos that should be generated per prompt device: (`torch.device`, *optional*): torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. For Sana, it's should be the embeddings of the "" string. clean_caption (`bool`, defaults to `False`): If `True`, the function will preprocess and clean the provided caption before encoding. max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt. complex_human_instruction (`list[str]`, defaults to `complex_human_instruction`): If `complex_human_instruction` is not empty, the function will use the complex Human instruction for the prompt. """ if device is None: device = self._execution_device if self.text_encoder is not None: dtype = self.text_encoder.dtype else: dtype = None # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, SanaLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if getattr(self, "tokenizer", None) is not None: self.tokenizer.padding_side = "right" # See Section 3.1. of the paper. max_length = max_sequence_length select_index = [0] + list(range(-max_length + 1, 0)) if prompt_embeds is None: prompt_embeds, prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=prompt, device=device, dtype=dtype, clean_caption=clean_caption, max_sequence_length=max_sequence_length, complex_human_instruction=complex_human_instruction, ) prompt_embeds = prompt_embeds[:, select_index] prompt_attention_mask = prompt_attention_mask[:, select_index] bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1) prompt_attention_mask = prompt_attention_mask.view(bs_embed, -1) prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = [negative_prompt] * batch_size if isinstance(negative_prompt, str) else negative_prompt negative_prompt_embeds, negative_prompt_attention_mask = self._get_gemma_prompt_embeds( prompt=negative_prompt, device=device, dtype=dtype, clean_caption=clean_caption, max_sequence_length=max_sequence_length, complex_human_instruction=False, ) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed, -1) negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(num_videos_per_prompt, 1) else: negative_prompt_embeds = None negative_prompt_attention_mask = None if self.text_encoder is not None: if isinstance(self, SanaLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, image, height, width, callback_on_step_end_tensor_inputs=None, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, prompt_attention_mask=None, negative_prompt_attention_mask=None, ): if height % 32 != 0 or width % 32 != 0: raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.") if image is not None and not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image): raise ValueError(f"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` but is {type(image)}") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and prompt_attention_mask is None: raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.") if negative_prompt_embeds is not None and negative_prompt_attention_mask is None: raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.") if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: raise ValueError( "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" f" {negative_prompt_attention_mask.shape}." ) # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing def _text_preprocessing(self, text, clean_caption=False): if clean_caption and not is_bs4_available(): logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if clean_caption and not is_ftfy_available(): logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`")) logger.warning("Setting `clean_caption` to False...") clean_caption = False if not isinstance(text, (tuple, list)): text = [text] def process(text: str): if clean_caption: text = self._clean_caption(text) text = self._clean_caption(text) else: text = text.lower().strip() return text return [process(t) for t in text] # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption def _clean_caption(self, caption): caption = str(caption) caption = ul.unquote_plus(caption) caption = caption.strip().lower() caption = re.sub("<person>", "person", caption) # urls: caption = re.sub( r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls caption = re.sub( r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa "", caption, ) # regex for urls # html: caption = BeautifulSoup(caption, features="html.parser").text # @<nickname> caption = re.sub(r"@[\w\d]+\b", "", caption) # 31C0—31EF CJK Strokes # 31F0—31FF Katakana Phonetic Extensions # 3200—32FF Enclosed CJK Letters and Months # 3300—33FF CJK Compatibility # 3400—4DBF CJK Unified Ideographs Extension A # 4DC0—4DFF Yijing Hexagram Symbols # 4E00—9FFF CJK Unified Ideographs caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) caption = re.sub(r"[\u3200-\u32ff]+", "", caption) caption = re.sub(r"[\u3300-\u33ff]+", "", caption) caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) ####################################################### # все виды тире / all types of dash --> "-" caption = re.sub( r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa "-", caption, ) # кавычки к одному стандарту caption = re.sub(r"[`´«»“”¨]", '"', caption) caption = re.sub(r"[‘’]", "'", caption) # &quot; caption = re.sub(r"&quot;?", "", caption) # &amp caption = re.sub(r"&amp", "", caption) # ip addresses: caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) # article ids: caption = re.sub(r"\d:\d\d\s+$", "", caption) # \n caption = re.sub(r"\\n", " ", caption) # "#123" caption = re.sub(r"#\d{1,3}\b", "", caption) # "#12345.." caption = re.sub(r"#\d{5,}\b", "", caption) # "123456.." caption = re.sub(r"\b\d{6,}\b", "", caption) # filenames: caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) # caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " # this-is-my-cute-cat / this_is_my_cute_cat regex2 = re.compile(r"(?:\-|\_)") if len(re.findall(regex2, caption)) > 3: caption = re.sub(regex2, " ", caption) caption = ftfy.fix_text(caption) caption = html.unescape(html.unescape(caption)) caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) caption = re.sub(r"\bpage\s+\d+\b", "", caption) caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) caption = re.sub(r"\b\s+\:\s+", r": ", caption) caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) caption = re.sub(r"\s+", " ", caption) caption.strip() caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) caption = re.sub(r"^\.\S+$", "", caption) return caption.strip() def prepare_latents( self, image: PipelineImageInput, batch_size: int, num_channels_latents: int = 16, height: int = 480, width: int = 832, num_frames: int = 81, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> torch.Tensor: num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 shape = ( batch_size, num_channels_latents, num_latent_frames, int(height) // self.vae_scale_factor_spatial, int(width) // self.vae_scale_factor_spatial, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) image = image.unsqueeze(2) # [B, C, 1, H, W] image = image.to(device=device, dtype=self.vae.dtype) if isinstance(generator, list): image_latents = [retrieve_latents(self.vae.encode(image), sample_mode="argmax") for _ in generator] image_latents = torch.cat(image_latents) else: image_latents = retrieve_latents(self.vae.encode(image), sample_mode="argmax") image_latents = image_latents.repeat(batch_size, 1, 1, 1, 1) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, -1, 1, 1, 1) .to(image_latents.device, image_latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, -1, 1, 1, 1).to( image_latents.device, image_latents.dtype ) image_latents = (image_latents - latents_mean) * latents_std latents[:, :, 0:1] = image_latents.to(dtype) return latents @property def guidance_scale(self): return self._guidance_scale @property def attention_kwargs(self): return self._attention_kwargs @property def do_classifier_free_guidance(self): return self._guidance_scale > 1.0 @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, prompt: str | list[str] = None, negative_prompt: str = "", num_inference_steps: int = 50, timesteps: list[int] = None, sigmas: list[float] = None, guidance_scale: float = 6.0, num_videos_per_prompt: int | None = 1, height: int = 480, width: int = 832, frames: int = 81, eta: float = 0.0, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, prompt_attention_mask: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, negative_prompt_attention_mask: torch.Tensor | None = None, output_type: str | None = "pil", return_dict: bool = True, clean_caption: bool = False, use_resolution_binning: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 300, complex_human_instruction: list[str] = [ "Given a user prompt, generate an 'Enhanced prompt' that provides detailed visual descriptions suitable for video generation. Evaluate the level of detail in the user prompt:", "- If the prompt is simple, focus on adding specifics about colors, shapes, sizes, textures, motion, and temporal relationships to create vivid and dynamic scenes.", "- If the prompt is already detailed, refine and enhance the existing details slightly without overcomplicating.", "Here are examples of how to transform or refine prompts:", "- User Prompt: A cat sleeping -> Enhanced: A small, fluffy white cat slowly settling into a curled position, peacefully falling asleep on a warm sunny windowsill, with gentle sunlight filtering through surrounding pots of blooming red flowers.", "- User Prompt: A busy city street -> Enhanced: A bustling city street scene at dusk, featuring glowing street lamps gradually lighting up, a diverse crowd of people in colorful clothing walking past, and a double-decker bus smoothly passing by towering glass skyscrapers.", "Please generate only the enhanced description for the prompt below and avoid including any additional commentary or evaluations:", "User Prompt: ", ], ) -> SanaVideoPipelineOutput | tuple: """ Function invoked when calling the pipeline for generation. Args: image (`PipelineImageInput`): The input image to condition the video generation on. The first frame of the generated video will be conditioned on this image. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the video generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality video at the expense of slower inference. timesteps (`list[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. sigmas (`list[float]`, *optional*): Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. guidance_scale (`float`, *optional*, defaults to 4.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`, usually at the expense of lower video quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. height (`int`, *optional*, defaults to 480): The height in pixels of the generated video. width (`int`, *optional*, defaults to 832): The width in pixels of the generated video. frames (`int`, *optional*, defaults to 81): The number of frames in the generated video. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will be generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. negative_prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for negative text embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated video. Choose between mp4 or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`SanaVideoPipelineOutput`] instead of a plain tuple. attention_kwargs: A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clean_caption (`bool`, *optional*, defaults to `True`): Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to be installed. If the dependencies are not installed, the embeddings will be created from the raw prompt. use_resolution_binning (`bool` defaults to `True`): If set to `True`, the requested height and width are first mapped to the closest resolutions using `ASPECT_RATIO_480_BIN` or `ASPECT_RATIO_720_BIN`. After the produced latents are decoded into videos, they are resized back to the requested resolution. Useful for generating non-square videos. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to `300`): Maximum sequence length to use with the `prompt`. complex_human_instruction (`list[str]`, *optional*): Instructions for complex human attention: https://github.com/NVlabs/Sana/blob/main/configs/sana_app_config/Sana_1600M_app.yaml#L55. Examples: Returns: [`~pipelines.sana_video.pipeline_output.SanaVideoPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.sana_video.pipeline_output.SanaVideoPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated videos """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct if use_resolution_binning: if self.transformer.config.sample_size == 30: aspect_ratio_bin = ASPECT_RATIO_480_BIN elif self.transformer.config.sample_size == 22: aspect_ratio_bin = ASPECT_RATIO_720_BIN else: raise ValueError("Invalid sample size") orig_height, orig_width = height, width height, width = self.video_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) self.check_inputs( prompt, image, height, width, callback_on_step_end_tensor_inputs, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._interrupt = False # 2. Default height and width to transformer if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = self.attention_kwargs.get("scale", None) if self.attention_kwargs is not None else None # 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt, self.do_classifier_free_guidance, negative_prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length, complex_human_instruction=complex_human_instruction, lora_scale=lora_scale, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps, device, timesteps, sigmas ) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels image = self.video_processor.preprocess(image, height=height, width=width).to(device, dtype=torch.float32) latents = self.prepare_latents( image, batch_size * num_videos_per_prompt, latent_channels, height, width, frames, torch.float32, device, generator, latents, ) conditioning_mask = latents.new_zeros( batch_size, 1, latents.shape[2] // self.transformer_temporal_patch_size, latents.shape[3] // self.transformer_spatial_patch_size, latents.shape[4] // self.transformer_spatial_patch_size, ) conditioning_mask[:, :, 0] = 1.0 if self.do_classifier_free_guidance: conditioning_mask = torch.cat([conditioning_mask, conditioning_mask]) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) transformer_dtype = self.transformer.dtype with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(conditioning_mask.shape) timestep = timestep * (1 - conditioning_mask) # predict noise model_output noise_pred = self.transformer( latent_model_input.to(dtype=transformer_dtype), encoder_hidden_states=prompt_embeds.to(dtype=transformer_dtype), encoder_attention_mask=prompt_attention_mask, timestep=timestep, return_dict=False, attention_kwargs=self.attention_kwargs, )[0] noise_pred = noise_pred.float() # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) timestep, _ = timestep.chunk(2) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: noise_pred = noise_pred.chunk(2, dim=1)[0] noise_pred = noise_pred[:, :, 1:] noise_latents = latents[:, :, 1:] pred_latents = self.scheduler.step( noise_pred, t, noise_latents, **extra_step_kwargs, return_dict=False )[0] latents = torch.cat([latents[:, :, :1], pred_latents], dim=2) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": video = latents else: latents = latents.to(self.vae.dtype) torch_accelerator_module = getattr(torch, get_device(), torch.cuda) oom_error = ( torch.OutOfMemoryError if is_torch_version(">=", "2.5.0") else torch_accelerator_module.OutOfMemoryError ) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean try: video = self.vae.decode(latents, return_dict=False)[0] except oom_error as e: warnings.warn( f"{e}. \n" f"Try to use VAE tiling for large images. For example: \n" f"pipe.vae.enable_tiling(tile_sample_min_width=512, tile_sample_min_height=512)" ) if use_resolution_binning: video = self.video_processor.resize_and_crop_tensor(video, orig_width, orig_height) video = self.video_processor.postprocess_video(video, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return SanaVideoPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/sana_video/pipeline_sana_video_i2v.py", "license": "Apache License 2.0", "lines": 920, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/pipelines/sana_video/test_sana_video_i2v.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer from diffusers import ( AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, SanaImageToVideoPipeline, SanaVideoTransformer3DModel, ) from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SanaImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = SanaImageToVideoPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler() torch.manual_seed(0) text_encoder_config = Gemma2Config( head_dim=16, hidden_size=8, initializer_range=0.02, intermediate_size=64, max_position_embeddings=8192, model_type="gemma2", num_attention_heads=2, num_hidden_layers=1, num_key_value_heads=2, vocab_size=8, attn_implementation="eager", ) text_encoder = Gemma2Model(text_encoder_config) tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") torch.manual_seed(0) transformer = SanaVideoTransformer3DModel( in_channels=16, out_channels=16, num_attention_heads=2, attention_head_dim=12, num_layers=2, num_cross_attention_heads=2, cross_attention_head_dim=12, cross_attention_dim=24, caption_channels=8, mlp_ratio=2.5, dropout=0.0, attention_bias=False, sample_size=8, patch_size=(1, 2, 2), norm_elementwise_affine=False, norm_eps=1e-6, qk_norm="rms_norm_across_heads", rope_max_seq_len=32, ) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) # Create a dummy image input (PIL Image) image = Image.new("RGB", (32, 32)) inputs = { "image": image, "prompt": "", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 32, "width": 32, "frames": 9, "max_sequence_length": 16, "output_type": "pt", "complex_human_instruction": [], "use_resolution_binning": False, } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (9, 3, 32, 32)) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass def test_save_load_local(self, expected_max_difference=5e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) torch.manual_seed(0) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) torch.manual_seed(0) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() self.assertLess(max_diff, expected_max_difference) # TODO(aryan): Create a dummy gemma model with smol vocab size @unittest.skip( "A very small vocab size is used for fast tests. So, Any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_consistent(self): pass @unittest.skip( "A very small vocab size is used for fast tests. So, Any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." ) def test_inference_batch_single_identical(self): pass @unittest.skip("Skipping fp16 test as model is trained with bf16") def test_float16_inference(self): # Requires higher tolerance as model seems very sensitive to dtype super().test_float16_inference(expected_max_diff=0.08) @unittest.skip("Skipping fp16 test as model is trained with bf16") def test_save_load_float16(self): # Requires higher tolerance as model seems very sensitive to dtype super().test_save_load_float16(expected_max_diff=0.2) @slow @require_torch_accelerator class SanaVideoPipelineIntegrationTests(unittest.TestCase): prompt = "Evening, backlight, side lighting, soft light, high contrast, mid-shot, centered composition, clean solo shot, warm color. A young Caucasian man stands in a forest." def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) @unittest.skip("TODO: test needs to be implemented") def test_sana_video_480p(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/sana_video/test_sana_video_i2v.py", "license": "Apache License 2.0", "lines": 204, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/models/transformers/transformer_wan_animate.py
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import apply_lora_scale, logging from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import PixArtAlphaTextProjection, TimestepEmbedding, Timesteps, get_1d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import FP32LayerNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name WAN_ANIMATE_MOTION_ENCODER_CHANNEL_SIZES = { "4": 512, "8": 512, "16": 512, "32": 512, "64": 256, "128": 128, "256": 64, "512": 32, "1024": 16, } # Copied from diffusers.models.transformers.transformer_wan._get_qkv_projections def _get_qkv_projections(attn: "WanAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor): # encoder_hidden_states is only passed for cross-attention if encoder_hidden_states is None: encoder_hidden_states = hidden_states if attn.fused_projections: if not attn.is_cross_attention: # In self-attention layers, we can fuse the entire QKV projection into a single linear query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) else: # In cross-attention layers, we can only fuse the KV projections into a single linear query = attn.to_q(hidden_states) key, value = attn.to_kv(encoder_hidden_states).chunk(2, dim=-1) else: query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) return query, key, value # Copied from diffusers.models.transformers.transformer_wan._get_added_kv_projections def _get_added_kv_projections(attn: "WanAttention", encoder_hidden_states_img: torch.Tensor): if attn.fused_projections: key_img, value_img = attn.to_added_kv(encoder_hidden_states_img).chunk(2, dim=-1) else: key_img = attn.add_k_proj(encoder_hidden_states_img) value_img = attn.add_v_proj(encoder_hidden_states_img) return key_img, value_img class FusedLeakyReLU(nn.Module): """ Fused LeakyRelu with scale factor and channel-wise bias. """ def __init__(self, negative_slope: float = 0.2, scale: float = 2**0.5, bias_channels: int | None = None): super().__init__() self.negative_slope = negative_slope self.scale = scale self.channels = bias_channels if self.channels is not None: self.bias = nn.Parameter( torch.zeros( self.channels, ) ) else: self.bias = None def forward(self, x: torch.Tensor, channel_dim: int = 1) -> torch.Tensor: if self.bias is not None: # Expand self.bias to have all singleton dims except at self.channel_dim expanded_shape = [1] * x.ndim expanded_shape[channel_dim] = self.bias.shape[0] bias = self.bias.reshape(*expanded_shape) x = x + bias return F.leaky_relu(x, self.negative_slope) * self.scale class MotionConv2d(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, padding: int = 0, bias: bool = True, blur_kernel: tuple[int, ...] | None = None, blur_upsample_factor: int = 1, use_activation: bool = True, ): super().__init__() self.use_activation = use_activation self.in_channels = in_channels # Handle blurring (applying a FIR filter with the given kernel) if available self.blur = False if blur_kernel is not None: p = (len(blur_kernel) - stride) + (kernel_size - 1) self.blur_padding = ((p + 1) // 2, p // 2) kernel = torch.tensor(blur_kernel) # Convert kernel to 2D if necessary if kernel.ndim == 1: kernel = kernel[None, :] * kernel[:, None] # Normalize kernel kernel = kernel / kernel.sum() if blur_upsample_factor > 1: kernel = kernel * (blur_upsample_factor**2) self.register_buffer("blur_kernel", kernel, persistent=False) self.blur = True # Main Conv2d parameters (with scale factor) self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size)) self.scale = 1 / math.sqrt(in_channels * kernel_size**2) self.stride = stride self.padding = padding # If using an activation function, the bias will be fused into the activation if bias and not self.use_activation: self.bias = nn.Parameter(torch.zeros(out_channels)) else: self.bias = None if self.use_activation: self.act_fn = FusedLeakyReLU(bias_channels=out_channels) else: self.act_fn = None def forward(self, x: torch.Tensor, channel_dim: int = 1) -> torch.Tensor: # Apply blur if using if self.blur: # NOTE: the original implementation uses a 2D upfirdn operation with the upsampling and downsampling rates # set to 1, which should be equivalent to a 2D convolution expanded_kernel = self.blur_kernel[None, None, :, :].expand(self.in_channels, 1, -1, -1) x = x.to(expanded_kernel.dtype) x = F.conv2d(x, expanded_kernel, padding=self.blur_padding, groups=self.in_channels) # Main Conv2D with scaling x = x.to(self.weight.dtype) x = F.conv2d(x, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding) # Activation with fused bias, if using if self.use_activation: x = self.act_fn(x, channel_dim=channel_dim) return x def __repr__(self): return ( f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}," f" kernel_size={self.weight.shape[2]}, stride={self.stride}, padding={self.padding})" ) class MotionLinear(nn.Module): def __init__( self, in_dim: int, out_dim: int, bias: bool = True, use_activation: bool = False, ): super().__init__() self.use_activation = use_activation # Linear weight with scale factor self.weight = nn.Parameter(torch.randn(out_dim, in_dim)) self.scale = 1 / math.sqrt(in_dim) # If an activation is present, the bias will be fused to it if bias and not self.use_activation: self.bias = nn.Parameter(torch.zeros(out_dim)) else: self.bias = None if self.use_activation: self.act_fn = FusedLeakyReLU(bias_channels=out_dim) else: self.act_fn = None def forward(self, input: torch.Tensor, channel_dim: int = 1) -> torch.Tensor: out = F.linear(input, self.weight * self.scale, bias=self.bias) if self.use_activation: out = self.act_fn(out, channel_dim=channel_dim) return out def __repr__(self): return ( f"{self.__class__.__name__}(in_features={self.weight.shape[1]}, out_features={self.weight.shape[0]}," f" bias={self.bias is not None})" ) class MotionEncoderResBlock(nn.Module): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, kernel_size_skip: int = 1, blur_kernel: tuple[int, ...] = (1, 3, 3, 1), downsample_factor: int = 2, ): super().__init__() self.downsample_factor = downsample_factor # 3 x 3 Conv + fused leaky ReLU self.conv1 = MotionConv2d( in_channels, in_channels, kernel_size, stride=1, padding=kernel_size // 2, use_activation=True, ) # 3 x 3 Conv that downsamples 2x + fused leaky ReLU self.conv2 = MotionConv2d( in_channels, out_channels, kernel_size=kernel_size, stride=self.downsample_factor, padding=0, blur_kernel=blur_kernel, use_activation=True, ) # 1 x 1 Conv that downsamples 2x in skip connection self.conv_skip = MotionConv2d( in_channels, out_channels, kernel_size=kernel_size_skip, stride=self.downsample_factor, padding=0, bias=False, blur_kernel=blur_kernel, use_activation=False, ) def forward(self, x: torch.Tensor, channel_dim: int = 1) -> torch.Tensor: x_out = self.conv1(x, channel_dim) x_out = self.conv2(x_out, channel_dim) x_skip = self.conv_skip(x, channel_dim) x_out = (x_out + x_skip) / math.sqrt(2) return x_out class WanAnimateMotionEncoder(nn.Module): def __init__( self, size: int = 512, style_dim: int = 512, motion_dim: int = 20, out_dim: int = 512, motion_blocks: int = 5, channels: dict[str, int] | None = None, ): super().__init__() self.size = size # Appearance encoder: conv layers if channels is None: channels = WAN_ANIMATE_MOTION_ENCODER_CHANNEL_SIZES self.conv_in = MotionConv2d(3, channels[str(size)], 1, use_activation=True) self.res_blocks = nn.ModuleList() in_channels = channels[str(size)] log_size = int(math.log(size, 2)) for i in range(log_size, 2, -1): out_channels = channels[str(2 ** (i - 1))] self.res_blocks.append(MotionEncoderResBlock(in_channels, out_channels)) in_channels = out_channels self.conv_out = MotionConv2d(in_channels, style_dim, 4, padding=0, bias=False, use_activation=False) # Motion encoder: linear layers # NOTE: there are no activations in between the linear layers here, which is weird but I believe matches the # original code. linears = [MotionLinear(style_dim, style_dim) for _ in range(motion_blocks - 1)] linears.append(MotionLinear(style_dim, motion_dim)) self.motion_network = nn.ModuleList(linears) self.motion_synthesis_weight = nn.Parameter(torch.randn(out_dim, motion_dim)) def forward(self, face_image: torch.Tensor, channel_dim: int = 1) -> torch.Tensor: if (face_image.shape[-2] != self.size) or (face_image.shape[-1] != self.size): raise ValueError( f"Face pixel values has resolution ({face_image.shape[-1]}, {face_image.shape[-2]}) but is expected" f" to have resolution ({self.size}, {self.size})" ) # Appearance encoding through convs face_image = self.conv_in(face_image, channel_dim) for block in self.res_blocks: face_image = block(face_image, channel_dim) face_image = self.conv_out(face_image, channel_dim) motion_feat = face_image.squeeze(-1).squeeze(-1) # Motion feature extraction for linear_layer in self.motion_network: motion_feat = linear_layer(motion_feat, channel_dim=channel_dim) # Motion synthesis via Linear Motion Decomposition weight = self.motion_synthesis_weight + 1e-8 # Upcast the QR orthogonalization operation to FP32 original_motion_dtype = motion_feat.dtype motion_feat = motion_feat.to(torch.float32) weight = weight.to(torch.float32) Q = torch.linalg.qr(weight)[0].to(device=motion_feat.device) motion_feat_diag = torch.diag_embed(motion_feat) # Alpha, diagonal matrix motion_decomposition = torch.matmul(motion_feat_diag, Q.T) motion_vec = torch.sum(motion_decomposition, dim=1) motion_vec = motion_vec.to(dtype=original_motion_dtype) return motion_vec class WanAnimateFaceEncoder(nn.Module): def __init__( self, in_dim: int, out_dim: int, hidden_dim: int = 1024, num_heads: int = 4, kernel_size: int = 3, eps: float = 1e-6, pad_mode: str = "replicate", ): super().__init__() self.num_heads = num_heads self.time_causal_padding = (kernel_size - 1, 0) self.pad_mode = pad_mode self.act = nn.SiLU() self.conv1_local = nn.Conv1d(in_dim, hidden_dim * num_heads, kernel_size=kernel_size, stride=1) self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, kernel_size, stride=2) self.conv3 = nn.Conv1d(hidden_dim, hidden_dim, kernel_size, stride=2) self.norm1 = nn.LayerNorm(hidden_dim, eps, elementwise_affine=False) self.norm2 = nn.LayerNorm(hidden_dim, eps, elementwise_affine=False) self.norm3 = nn.LayerNorm(hidden_dim, eps, elementwise_affine=False) self.out_proj = nn.Linear(hidden_dim, out_dim) self.padding_tokens = nn.Parameter(torch.zeros(1, 1, 1, out_dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: batch_size = x.shape[0] # Reshape to channels-first to apply causal Conv1d over frame dim x = x.permute(0, 2, 1) x = F.pad(x, self.time_causal_padding, mode=self.pad_mode) x = self.conv1_local(x) # [B, C, T_padded] --> [B, N * C, T] x = x.unflatten(1, (self.num_heads, -1)).flatten(0, 1) # [B, N * C, T] --> [B * N, C, T] # Reshape back to channels-last to apply LayerNorm over channel dim x = x.permute(0, 2, 1) x = self.norm1(x) x = self.act(x) x = x.permute(0, 2, 1) x = F.pad(x, self.time_causal_padding, mode=self.pad_mode) x = self.conv2(x) x = x.permute(0, 2, 1) x = self.norm2(x) x = self.act(x) x = x.permute(0, 2, 1) x = F.pad(x, self.time_causal_padding, mode=self.pad_mode) x = self.conv3(x) x = x.permute(0, 2, 1) x = self.norm3(x) x = self.act(x) x = self.out_proj(x) x = x.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3) # [B * N, T, C_out] --> [B, T, N, C_out] padding = self.padding_tokens.repeat(batch_size, x.shape[1], 1, 1).to(device=x.device) x = torch.cat([x, padding], dim=-2) # [B, T, N, C_out] --> [B, T, N + 1, C_out] return x class WanAnimateFaceBlockAttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( f"{self.__class__.__name__} requires PyTorch 2.0. To use it, please upgrade PyTorch to version 2.0 or" f" higher." ) def __call__( self, attn: "WanAnimateFaceBlockCrossAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, ) -> torch.Tensor: # encoder_hidden_states corresponds to the motion vec # attention_mask corresponds to the motion mask (if any) hidden_states = attn.pre_norm_q(hidden_states) encoder_hidden_states = attn.pre_norm_kv(encoder_hidden_states) # B --> batch_size, T --> reduced inference segment len, N --> face_encoder_num_heads + 1, C --> attn.dim B, T, N, C = encoder_hidden_states.shape query, key, value = _get_qkv_projections(attn, hidden_states, encoder_hidden_states) query = query.unflatten(2, (attn.heads, -1)) # [B, S, H * D] --> [B, S, H, D] key = key.view(B, T, N, attn.heads, -1) # [B, T, N, H * D_kv] --> [B, T, N, H, D_kv] value = value.view(B, T, N, attn.heads, -1) query = attn.norm_q(query) key = attn.norm_k(key) # NOTE: the below line (which follows the official code) means that in practice, the number of frames T in # encoder_hidden_states (the motion vector after applying the face encoder) must evenly divide the # post-patchify sequence length S of the transformer hidden_states. Is it possible to remove this dependency? query = query.unflatten(1, (T, -1)).flatten(0, 1) # [B, S, H, D] --> [B * T, S / T, H, D] key = key.flatten(0, 1) # [B, T, N, H, D_kv] --> [B * T, N, H, D_kv] value = value.flatten(0, 1) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, backend=self._attention_backend, parallel_config=self._parallel_config, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.type_as(query) hidden_states = hidden_states.unflatten(0, (B, T)).flatten(1, 2) hidden_states = attn.to_out(hidden_states) if attention_mask is not None: # NOTE: attention_mask is assumed to be a multiplicative mask attention_mask = attention_mask.flatten(start_dim=1) hidden_states = hidden_states * attention_mask return hidden_states class WanAnimateFaceBlockCrossAttention(nn.Module, AttentionModuleMixin): """ Temporally-aligned cross attention with the face motion signal in the Wan Animate Face Blocks. """ _default_processor_cls = WanAnimateFaceBlockAttnProcessor _available_processors = [WanAnimateFaceBlockAttnProcessor] def __init__( self, dim: int, heads: int = 8, dim_head: int = 64, eps: float = 1e-6, cross_attention_dim_head: int | None = None, bias: bool = True, processor=None, ): super().__init__() self.inner_dim = dim_head * heads self.heads = heads self.cross_attention_dim_head = cross_attention_dim_head self.kv_inner_dim = self.inner_dim if cross_attention_dim_head is None else cross_attention_dim_head * heads self.use_bias = bias self.is_cross_attention = cross_attention_dim_head is not None # 1. Pre-Attention Norms for the hidden_states (video latents) and encoder_hidden_states (motion vector). # NOTE: this is not used in "vanilla" WanAttention self.pre_norm_q = nn.LayerNorm(dim, eps, elementwise_affine=False) self.pre_norm_kv = nn.LayerNorm(dim, eps, elementwise_affine=False) # 2. QKV and Output Projections self.to_q = torch.nn.Linear(dim, self.inner_dim, bias=bias) self.to_k = torch.nn.Linear(dim, self.kv_inner_dim, bias=bias) self.to_v = torch.nn.Linear(dim, self.kv_inner_dim, bias=bias) self.to_out = torch.nn.Linear(self.inner_dim, dim, bias=bias) # 3. QK Norm # NOTE: this is applied after the reshape, so only over dim_head rather than dim_head * heads self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=True) self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=True) # 4. Set attention processor if processor is None: processor = self._default_processor_cls() self.set_processor(processor) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, **kwargs, ) -> torch.Tensor: return self.processor(self, hidden_states, encoder_hidden_states, attention_mask) # Copied from diffusers.models.transformers.transformer_wan.WanAttnProcessor class WanAttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "WanAttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to version 2.0 or higher." ) def __call__( self, attn: "WanAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, ) -> torch.Tensor: encoder_hidden_states_img = None if attn.add_k_proj is not None: # 512 is the context length of the text encoder, hardcoded for now image_context_length = encoder_hidden_states.shape[1] - 512 encoder_hidden_states_img = encoder_hidden_states[:, :image_context_length] encoder_hidden_states = encoder_hidden_states[:, image_context_length:] query, key, value = _get_qkv_projections(attn, hidden_states, encoder_hidden_states) query = attn.norm_q(query) key = attn.norm_k(key) query = query.unflatten(2, (attn.heads, -1)) key = key.unflatten(2, (attn.heads, -1)) value = value.unflatten(2, (attn.heads, -1)) if rotary_emb is not None: def apply_rotary_emb( hidden_states: torch.Tensor, freqs_cos: torch.Tensor, freqs_sin: torch.Tensor, ): x1, x2 = hidden_states.unflatten(-1, (-1, 2)).unbind(-1) cos = freqs_cos[..., 0::2] sin = freqs_sin[..., 1::2] out = torch.empty_like(hidden_states) out[..., 0::2] = x1 * cos - x2 * sin out[..., 1::2] = x1 * sin + x2 * cos return out.type_as(hidden_states) query = apply_rotary_emb(query, *rotary_emb) key = apply_rotary_emb(key, *rotary_emb) # I2V task hidden_states_img = None if encoder_hidden_states_img is not None: key_img, value_img = _get_added_kv_projections(attn, encoder_hidden_states_img) key_img = attn.norm_added_k(key_img) key_img = key_img.unflatten(2, (attn.heads, -1)) value_img = value_img.unflatten(2, (attn.heads, -1)) hidden_states_img = dispatch_attention_fn( query, key_img, value_img, attn_mask=None, dropout_p=0.0, is_causal=False, backend=self._attention_backend, # Reference: https://github.com/huggingface/diffusers/pull/12909 parallel_config=None, ) hidden_states_img = hidden_states_img.flatten(2, 3) hidden_states_img = hidden_states_img.type_as(query) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, # Reference: https://github.com/huggingface/diffusers/pull/12909 parallel_config=(self._parallel_config if encoder_hidden_states is None else None), ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.type_as(query) if hidden_states_img is not None: hidden_states = hidden_states + hidden_states_img hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states # Copied from diffusers.models.transformers.transformer_wan.WanAttention class WanAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = WanAttnProcessor _available_processors = [WanAttnProcessor] def __init__( self, dim: int, heads: int = 8, dim_head: int = 64, eps: float = 1e-5, dropout: float = 0.0, added_kv_proj_dim: int | None = None, cross_attention_dim_head: int | None = None, processor=None, is_cross_attention=None, ): super().__init__() self.inner_dim = dim_head * heads self.heads = heads self.added_kv_proj_dim = added_kv_proj_dim self.cross_attention_dim_head = cross_attention_dim_head self.kv_inner_dim = self.inner_dim if cross_attention_dim_head is None else cross_attention_dim_head * heads self.to_q = torch.nn.Linear(dim, self.inner_dim, bias=True) self.to_k = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) self.to_v = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) self.to_out = torch.nn.ModuleList( [ torch.nn.Linear(self.inner_dim, dim, bias=True), torch.nn.Dropout(dropout), ] ) self.norm_q = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) self.norm_k = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) self.add_k_proj = self.add_v_proj = None if added_kv_proj_dim is not None: self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) self.norm_added_k = torch.nn.RMSNorm(dim_head * heads, eps=eps) if is_cross_attention is not None: self.is_cross_attention = is_cross_attention else: self.is_cross_attention = cross_attention_dim_head is not None self.set_processor(processor) def fuse_projections(self): if getattr(self, "fused_projections", False): return if not self.is_cross_attention: concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_qkv = nn.Linear(in_features, out_features, bias=True) self.to_qkv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) else: concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_kv = nn.Linear(in_features, out_features, bias=True) self.to_kv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) if self.added_kv_proj_dim is not None: concatenated_weights = torch.cat([self.add_k_proj.weight.data, self.add_v_proj.weight.data]) concatenated_bias = torch.cat([self.add_k_proj.bias.data, self.add_v_proj.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_added_kv = nn.Linear(in_features, out_features, bias=True) self.to_added_kv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) self.fused_projections = True @torch.no_grad() def unfuse_projections(self): if not getattr(self, "fused_projections", False): return if hasattr(self, "to_qkv"): delattr(self, "to_qkv") if hasattr(self, "to_kv"): delattr(self, "to_kv") if hasattr(self, "to_added_kv"): delattr(self, "to_added_kv") self.fused_projections = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, **kwargs, ) -> torch.Tensor: return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, rotary_emb, **kwargs) # Copied from diffusers.models.transformers.transformer_wan.WanImageEmbedding class WanImageEmbedding(torch.nn.Module): def __init__(self, in_features: int, out_features: int, pos_embed_seq_len=None): super().__init__() self.norm1 = FP32LayerNorm(in_features) self.ff = FeedForward(in_features, out_features, mult=1, activation_fn="gelu") self.norm2 = FP32LayerNorm(out_features) if pos_embed_seq_len is not None: self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_seq_len, in_features)) else: self.pos_embed = None def forward(self, encoder_hidden_states_image: torch.Tensor) -> torch.Tensor: if self.pos_embed is not None: batch_size, seq_len, embed_dim = encoder_hidden_states_image.shape encoder_hidden_states_image = encoder_hidden_states_image.view(-1, 2 * seq_len, embed_dim) encoder_hidden_states_image = encoder_hidden_states_image + self.pos_embed hidden_states = self.norm1(encoder_hidden_states_image) hidden_states = self.ff(hidden_states) hidden_states = self.norm2(hidden_states) return hidden_states # Modified from diffusers.models.transformers.transformer_wan.WanTimeTextImageEmbedding class WanTimeTextImageEmbedding(nn.Module): def __init__( self, dim: int, time_freq_dim: int, time_proj_dim: int, text_embed_dim: int, image_embed_dim: int | None = None, pos_embed_seq_len: int | None = None, ): super().__init__() self.timesteps_proj = Timesteps(num_channels=time_freq_dim, flip_sin_to_cos=True, downscale_freq_shift=0) self.time_embedder = TimestepEmbedding(in_channels=time_freq_dim, time_embed_dim=dim) self.act_fn = nn.SiLU() self.time_proj = nn.Linear(dim, time_proj_dim) self.text_embedder = PixArtAlphaTextProjection(text_embed_dim, dim, act_fn="gelu_tanh") self.image_embedder = None if image_embed_dim is not None: self.image_embedder = WanImageEmbedding(image_embed_dim, dim, pos_embed_seq_len=pos_embed_seq_len) def forward( self, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_hidden_states_image: torch.Tensor | None = None, timestep_seq_len: int | None = None, ): timestep = self.timesteps_proj(timestep) if timestep_seq_len is not None: timestep = timestep.unflatten(0, (-1, timestep_seq_len)) if self.time_embedder.linear_1.weight.dtype.is_floating_point: time_embedder_dtype = self.time_embedder.linear_1.weight.dtype else: time_embedder_dtype = encoder_hidden_states.dtype temb = self.time_embedder(timestep.to(time_embedder_dtype)).type_as(encoder_hidden_states) timestep_proj = self.time_proj(self.act_fn(temb)) encoder_hidden_states = self.text_embedder(encoder_hidden_states) if encoder_hidden_states_image is not None: encoder_hidden_states_image = self.image_embedder(encoder_hidden_states_image) return temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image # Copied from diffusers.models.transformers.transformer_wan.WanRotaryPosEmbed class WanRotaryPosEmbed(nn.Module): def __init__( self, attention_head_dim: int, patch_size: tuple[int, int, int], max_seq_len: int, theta: float = 10000.0, ): super().__init__() self.attention_head_dim = attention_head_dim self.patch_size = patch_size self.max_seq_len = max_seq_len h_dim = w_dim = 2 * (attention_head_dim // 6) t_dim = attention_head_dim - h_dim - w_dim self.t_dim = t_dim self.h_dim = h_dim self.w_dim = w_dim freqs_dtype = torch.float32 if torch.backends.mps.is_available() else torch.float64 freqs_cos = [] freqs_sin = [] for dim in [t_dim, h_dim, w_dim]: freq_cos, freq_sin = get_1d_rotary_pos_embed( dim, max_seq_len, theta, use_real=True, repeat_interleave_real=True, freqs_dtype=freqs_dtype, ) freqs_cos.append(freq_cos) freqs_sin.append(freq_sin) self.register_buffer("freqs_cos", torch.cat(freqs_cos, dim=1), persistent=False) self.register_buffer("freqs_sin", torch.cat(freqs_sin, dim=1), persistent=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.patch_size ppf, pph, ppw = num_frames // p_t, height // p_h, width // p_w split_sizes = [self.t_dim, self.h_dim, self.w_dim] freqs_cos = self.freqs_cos.split(split_sizes, dim=1) freqs_sin = self.freqs_sin.split(split_sizes, dim=1) freqs_cos_f = freqs_cos[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) freqs_cos_h = freqs_cos[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) freqs_cos_w = freqs_cos[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) freqs_sin_f = freqs_sin[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) freqs_sin_h = freqs_sin[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) freqs_sin_w = freqs_sin[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) freqs_cos = torch.cat([freqs_cos_f, freqs_cos_h, freqs_cos_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) freqs_sin = torch.cat([freqs_sin_f, freqs_sin_h, freqs_sin_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) return freqs_cos, freqs_sin # Copied from diffusers.models.transformers.transformer_wan.WanTransformerBlock class WanTransformerBlock(nn.Module): def __init__( self, dim: int, ffn_dim: int, num_heads: int, qk_norm: str = "rms_norm_across_heads", cross_attn_norm: bool = False, eps: float = 1e-6, added_kv_proj_dim: int | None = None, ): super().__init__() # 1. Self-attention self.norm1 = FP32LayerNorm(dim, eps, elementwise_affine=False) self.attn1 = WanAttention( dim=dim, heads=num_heads, dim_head=dim // num_heads, eps=eps, cross_attention_dim_head=None, processor=WanAttnProcessor(), ) # 2. Cross-attention self.attn2 = WanAttention( dim=dim, heads=num_heads, dim_head=dim // num_heads, eps=eps, added_kv_proj_dim=added_kv_proj_dim, cross_attention_dim_head=dim // num_heads, processor=WanAttnProcessor(), ) self.norm2 = FP32LayerNorm(dim, eps, elementwise_affine=True) if cross_attn_norm else nn.Identity() # 3. Feed-forward self.ffn = FeedForward(dim, inner_dim=ffn_dim, activation_fn="gelu-approximate") self.norm3 = FP32LayerNorm(dim, eps, elementwise_affine=False) self.scale_shift_table = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, rotary_emb: torch.Tensor, ) -> torch.Tensor: if temb.ndim == 4: # temb: batch_size, seq_len, 6, inner_dim (wan2.2 ti2v) shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = ( self.scale_shift_table.unsqueeze(0) + temb.float() ).chunk(6, dim=2) # batch_size, seq_len, 1, inner_dim shift_msa = shift_msa.squeeze(2) scale_msa = scale_msa.squeeze(2) gate_msa = gate_msa.squeeze(2) c_shift_msa = c_shift_msa.squeeze(2) c_scale_msa = c_scale_msa.squeeze(2) c_gate_msa = c_gate_msa.squeeze(2) else: # temb: batch_size, 6, inner_dim (wan2.1/wan2.2 14B) shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = ( self.scale_shift_table + temb.float() ).chunk(6, dim=1) # 1. Self-attention norm_hidden_states = (self.norm1(hidden_states.float()) * (1 + scale_msa) + shift_msa).type_as(hidden_states) attn_output = self.attn1(norm_hidden_states, None, None, rotary_emb) hidden_states = (hidden_states.float() + attn_output * gate_msa).type_as(hidden_states) # 2. Cross-attention norm_hidden_states = self.norm2(hidden_states.float()).type_as(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states, None, None) hidden_states = hidden_states + attn_output # 3. Feed-forward norm_hidden_states = (self.norm3(hidden_states.float()) * (1 + c_scale_msa) + c_shift_msa).type_as( hidden_states ) ff_output = self.ffn(norm_hidden_states) hidden_states = (hidden_states.float() + ff_output.float() * c_gate_msa).type_as(hidden_states) return hidden_states class WanAnimateTransformer3DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin ): r""" A Transformer model for video-like data used in the WanAnimate model. Args: patch_size (`tuple[int]`, defaults to `(1, 2, 2)`): 3D patch dimensions for video embedding (t_patch, h_patch, w_patch). num_attention_heads (`int`, defaults to `40`): Fixed length for text embeddings. attention_head_dim (`int`, defaults to `128`): The number of channels in each head. in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, defaults to `16`): The number of channels in the output. text_dim (`int`, defaults to `512`): Input dimension for text embeddings. freq_dim (`int`, defaults to `256`): Dimension for sinusoidal time embeddings. ffn_dim (`int`, defaults to `13824`): Intermediate dimension in feed-forward network. num_layers (`int`, defaults to `40`): The number of layers of transformer blocks to use. window_size (`tuple[int]`, defaults to `(-1, -1)`): Window size for local attention (-1 indicates global attention). cross_attn_norm (`bool`, defaults to `True`): Enable cross-attention normalization. qk_norm (`bool`, defaults to `True`): Enable query/key normalization. eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. image_dim (`int`, *optional*, defaults to `1280`): The number of channels to use for the image embedding. If `None`, no projection is used. added_kv_proj_dim (`int`, *optional*, defaults to `5120`): The number of channels to use for the added key and value projections. If `None`, no projection is used. """ _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["patch_embedding", "condition_embedder", "norm"] _no_split_modules = ["WanTransformerBlock", "MotionEncoderResBlock"] _keep_in_fp32_modules = [ "time_embedder", "scale_shift_table", "norm1", "norm2", "norm3", "motion_synthesis_weight", ] _keys_to_ignore_on_load_unexpected = ["norm_added_q"] _repeated_blocks = ["WanTransformerBlock"] @register_to_config def __init__( self, patch_size: tuple[int] = (1, 2, 2), num_attention_heads: int = 40, attention_head_dim: int = 128, in_channels: int | None = 36, latent_channels: int | None = 16, out_channels: int | None = 16, text_dim: int = 4096, freq_dim: int = 256, ffn_dim: int = 13824, num_layers: int = 40, cross_attn_norm: bool = True, qk_norm: str | None = "rms_norm_across_heads", eps: float = 1e-6, image_dim: int | None = 1280, added_kv_proj_dim: int | None = None, rope_max_seq_len: int = 1024, pos_embed_seq_len: int | None = None, motion_encoder_channel_sizes: dict[str, int] | None = None, # Start of Wan Animate-specific args motion_encoder_size: int = 512, motion_style_dim: int = 512, motion_dim: int = 20, motion_encoder_dim: int = 512, face_encoder_hidden_dim: int = 1024, face_encoder_num_heads: int = 4, inject_face_latents_blocks: int = 5, motion_encoder_batch_size: int = 8, ) -> None: super().__init__() inner_dim = num_attention_heads * attention_head_dim # Allow either only in_channels or only latent_channels to be set for convenience if in_channels is None and latent_channels is not None: in_channels = 2 * latent_channels + 4 elif in_channels is not None and latent_channels is None: latent_channels = (in_channels - 4) // 2 elif in_channels is not None and latent_channels is not None: # TODO: should this always be true? assert in_channels == 2 * latent_channels + 4, "in_channels should be 2 * latent_channels + 4" else: raise ValueError("At least one of `in_channels` and `latent_channels` must be supplied.") out_channels = out_channels or latent_channels # 1. Patch & position embedding self.rope = WanRotaryPosEmbed(attention_head_dim, patch_size, rope_max_seq_len) self.patch_embedding = nn.Conv3d(in_channels, inner_dim, kernel_size=patch_size, stride=patch_size) self.pose_patch_embedding = nn.Conv3d(latent_channels, inner_dim, kernel_size=patch_size, stride=patch_size) # 2. Condition embeddings self.condition_embedder = WanTimeTextImageEmbedding( dim=inner_dim, time_freq_dim=freq_dim, time_proj_dim=inner_dim * 6, text_embed_dim=text_dim, image_embed_dim=image_dim, pos_embed_seq_len=pos_embed_seq_len, ) # Motion encoder self.motion_encoder = WanAnimateMotionEncoder( size=motion_encoder_size, style_dim=motion_style_dim, motion_dim=motion_dim, out_dim=motion_encoder_dim, channels=motion_encoder_channel_sizes, ) # Face encoder self.face_encoder = WanAnimateFaceEncoder( in_dim=motion_encoder_dim, out_dim=inner_dim, hidden_dim=face_encoder_hidden_dim, num_heads=face_encoder_num_heads, ) # 3. Transformer blocks self.blocks = nn.ModuleList( [ WanTransformerBlock( dim=inner_dim, ffn_dim=ffn_dim, num_heads=num_attention_heads, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, added_kv_proj_dim=added_kv_proj_dim, ) for _ in range(num_layers) ] ) self.face_adapter = nn.ModuleList( [ WanAnimateFaceBlockCrossAttention( dim=inner_dim, heads=num_attention_heads, dim_head=inner_dim // num_attention_heads, eps=eps, cross_attention_dim_head=inner_dim // num_attention_heads, processor=WanAnimateFaceBlockAttnProcessor(), ) for _ in range(num_layers // inject_face_latents_blocks) ] ) # 4. Output norm & projection self.norm_out = FP32LayerNorm(inner_dim, eps, elementwise_affine=False) self.proj_out = nn.Linear(inner_dim, out_channels * math.prod(patch_size)) self.scale_shift_table = nn.Parameter(torch.randn(1, 2, inner_dim) / inner_dim**0.5) self.gradient_checkpointing = False @apply_lora_scale("attention_kwargs") def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_hidden_states: torch.Tensor, encoder_hidden_states_image: torch.Tensor | None = None, pose_hidden_states: torch.Tensor | None = None, face_pixel_values: torch.Tensor | None = None, motion_encode_batch_size: int | None = None, return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, ) -> torch.Tensor | dict[str, torch.Tensor]: """ Forward pass of Wan2.2-Animate transformer model. Args: hidden_states (`torch.Tensor` of shape `(B, 2C + 4, T + 1, H, W)`): Input noisy video latents of shape `(B, 2C + 4, T + 1, H, W)`, where B is the batch size, C is the number of latent channels (16 for Wan VAE), T is the number of latent frames in an inference segment, H is the latent height, and W is the latent width. timestep: (`torch.LongTensor`): The current timestep in the denoising loop. encoder_hidden_states (`torch.Tensor`): Text embeddings from the text encoder (umT5 for Wan Animate). encoder_hidden_states_image (`torch.Tensor`): CLIP visual features of the reference (character) image. pose_hidden_states (`torch.Tensor` of shape `(B, C, T, H, W)`): Pose video latents. TODO: description face_pixel_values (`torch.Tensor` of shape `(B, C', S, H', W')`): Face video in pixel space (not latent space). Typically C' = 3 and H' and W' are the height/width of the face video in pixels. Here S is the inference segment length, usually set to 77. motion_encode_batch_size (`int`, *optional*): The batch size for batched encoding of the face video via the motion encoder. Will default to `self.config.motion_encoder_batch_size` if not set. return_dict (`bool`, *optional*, defaults to `True`): Whether to return the output as a dict or tuple. """ # Check that shapes match up if pose_hidden_states is not None and pose_hidden_states.shape[2] + 1 != hidden_states.shape[2]: raise ValueError( f"pose_hidden_states frame dim (dim 2) is {pose_hidden_states.shape[2]} but must be one less than the" f" hidden_states's corresponding frame dim: {hidden_states.shape[2]}" ) batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.config.patch_size post_patch_num_frames = num_frames // p_t post_patch_height = height // p_h post_patch_width = width // p_w # 1. Rotary position embedding rotary_emb = self.rope(hidden_states) # 2. Patch embedding hidden_states = self.patch_embedding(hidden_states) pose_hidden_states = self.pose_patch_embedding(pose_hidden_states) # Add pose embeddings to hidden states hidden_states[:, :, 1:] = hidden_states[:, :, 1:] + pose_hidden_states # Calling contiguous() here is important so that we don't recompile when performing regional compilation hidden_states = hidden_states.flatten(2).transpose(1, 2).contiguous() # 3. Condition embeddings (time, text, image) # Wan Animate is based on Wan 2.1 and thus uses Wan 2.1's timestep logic temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image = self.condition_embedder( timestep, encoder_hidden_states, encoder_hidden_states_image, timestep_seq_len=None ) # batch_size, 6, inner_dim timestep_proj = timestep_proj.unflatten(1, (6, -1)) if encoder_hidden_states_image is not None: encoder_hidden_states = torch.concat([encoder_hidden_states_image, encoder_hidden_states], dim=1) # 4. Get motion features from the face video # Motion vector computation from face pixel values batch_size, channels, num_face_frames, height, width = face_pixel_values.shape # Rearrange from (B, C, T, H, W) to (B*T, C, H, W) face_pixel_values = face_pixel_values.permute(0, 2, 1, 3, 4).reshape(-1, channels, height, width) # Extract motion features using motion encoder # Perform batched motion encoder inference to allow trading off inference speed for memory usage motion_encode_batch_size = motion_encode_batch_size or self.config.motion_encoder_batch_size face_batches = torch.split(face_pixel_values, motion_encode_batch_size) motion_vec_batches = [] for face_batch in face_batches: motion_vec_batch = self.motion_encoder(face_batch) motion_vec_batches.append(motion_vec_batch) motion_vec = torch.cat(motion_vec_batches) motion_vec = motion_vec.view(batch_size, num_face_frames, -1) # Now get face features from the motion vector motion_vec = self.face_encoder(motion_vec) # Add padding at the beginning (prepend zeros) pad_face = torch.zeros_like(motion_vec[:, :1]) motion_vec = torch.cat([pad_face, motion_vec], dim=1) # 5. Transformer blocks with face adapter integration for block_idx, block in enumerate(self.blocks): if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, timestep_proj, rotary_emb ) else: hidden_states = block(hidden_states, encoder_hidden_states, timestep_proj, rotary_emb) # Face adapter integration: apply after every 5th block (0, 5, 10, 15, ...) if block_idx % self.config.inject_face_latents_blocks == 0: face_adapter_block_idx = block_idx // self.config.inject_face_latents_blocks face_adapter_output = self.face_adapter[face_adapter_block_idx](hidden_states, motion_vec) # In case the face adapter and main transformer blocks are on different devices, which can happen when # using model parallelism face_adapter_output = face_adapter_output.to(device=hidden_states.device) hidden_states = face_adapter_output + hidden_states # 6. Output norm, projection & unpatchify # batch_size, inner_dim shift, scale = (self.scale_shift_table.to(temb.device) + temb.unsqueeze(1)).chunk(2, dim=1) hidden_states_original_dtype = hidden_states.dtype hidden_states = self.norm_out(hidden_states.float()) # Move the shift and scale tensors to the same device as hidden_states. # When using multi-GPU inference via accelerate these will be on the # first device rather than the last device, which hidden_states ends up # on. shift = shift.to(hidden_states.device) scale = scale.to(hidden_states.device) hidden_states = (hidden_states * (1 + scale) + shift).to(dtype=hidden_states_original_dtype) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, p_t, p_h, p_w, -1 ) hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6) output = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_wan_animate.py", "license": "Apache License 2.0", "lines": 1087, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/wan/image_processor.py
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import PIL.Image import torch from ...configuration_utils import register_to_config from ...image_processor import VaeImageProcessor from ...utils import PIL_INTERPOLATION class WanAnimateImageProcessor(VaeImageProcessor): r""" Image processor to preprocess the reference (character) image for the Wan Animate model. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method. vae_scale_factor (`int`, *optional*, defaults to `8`): VAE (spatial) scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor. vae_latent_channels (`int`, *optional*, defaults to `16`): VAE latent channels. spatial_patch_size (`tuple[int, int]`, *optional*, defaults to `(2, 2)`): The spatial patch size used by the diffusion transformer. For Wan models, this is typically (2, 2). resample (`str`, *optional*, defaults to `lanczos`): Resampling filter to use when resizing the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image to [-1,1]. do_binarize (`bool`, *optional*, defaults to `False`): Whether to binarize the image to 0/1. do_convert_rgb (`bool`, *optional*, defaults to be `False`): Whether to convert the images to RGB format. do_convert_grayscale (`bool`, *optional*, defaults to be `False`): Whether to convert the images to grayscale format. fill_color (`str` or `float` or `tuple[float, ...]`, *optional*, defaults to `None`): An optional fill color when `resize_mode` is set to `"fill"`. This will fill the empty space with that color instead of filling with data from the image. Any valid `color` argument to `PIL.Image.new` is valid; if `None`, will default to filling with data from `image`. """ @register_to_config def __init__( self, do_resize: bool = True, vae_scale_factor: int = 8, vae_latent_channels: int = 16, spatial_patch_size: tuple[int, int] = (2, 2), resample: str = "lanczos", reducing_gap: int = None, do_normalize: bool = True, do_binarize: bool = False, do_convert_rgb: bool = False, do_convert_grayscale: bool = False, fill_color: str | float | tuple[float, ...] | None = 0, ): super().__init__() if do_convert_rgb and do_convert_grayscale: raise ValueError( "`do_convert_rgb` and `do_convert_grayscale` can not both be set to `True`," " if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.", " if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`", ) def _resize_and_fill( self, image: PIL.Image.Image, width: int, height: int, ) -> PIL.Image.Image: r""" Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. Args: image (`PIL.Image.Image`): The image to resize and fill. width (`int`): The width to resize the image to. height (`int`): The height to resize the image to. Returns: `PIL.Image.Image`: The resized and filled image. """ ratio = width / height src_ratio = image.width / image.height fill_with_image_data = self.config.fill_color is None fill_color = self.config.fill_color or 0 src_w = width if ratio < src_ratio else image.width * height // image.height src_h = height if ratio >= src_ratio else image.height * width // image.width resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION[self.config.resample]) res = PIL.Image.new("RGB", (width, height), color=fill_color) res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) if fill_with_image_data: if ratio < src_ratio: fill_height = height // 2 - src_h // 2 if fill_height > 0: res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0)) res.paste( resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h), ) elif ratio > src_ratio: fill_width = width // 2 - src_w // 2 if fill_width > 0: res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0)) res.paste( resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0), ) return res def get_default_height_width( self, image: PIL.Image.Image | np.ndarray | torch.Tensor, height: int | None = None, width: int | None = None, ) -> tuple[int, int]: r""" Returns the height and width of the image, downscaled to the next integer multiple of `vae_scale_factor`. Args: image (`PIL.Image.Image | np.ndarray | torch.Tensor`): The image input, which can be a PIL image, NumPy array, or PyTorch tensor. If it is a NumPy array, it should have shape `[batch, height, width]` or `[batch, height, width, channels]`. If it is a PyTorch tensor, it should have shape `[batch, channels, height, width]`. height (`int | None`, *optional*, defaults to `None`): The height of the preprocessed image. If `None`, the height of the `image` input will be used. width (`int | None`, *optional*, defaults to `None`): The width of the preprocessed image. If `None`, the width of the `image` input will be used. Returns: `tuple[int, int]`: A tuple containing the height and width, both resized to the nearest integer multiple of `vae_scale_factor * spatial_patch_size`. """ if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[2] else: height = image.shape[1] if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[3] else: width = image.shape[2] max_area = width * height aspect_ratio = height / width mod_value_h = self.config.vae_scale_factor * self.config.spatial_patch_size[0] mod_value_w = self.config.vae_scale_factor * self.config.spatial_patch_size[1] # Try to preserve the aspect ratio height = round(np.sqrt(max_area * aspect_ratio)) // mod_value_h * mod_value_h width = round(np.sqrt(max_area / aspect_ratio)) // mod_value_w * mod_value_w return height, width
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/wan/image_processor.py", "license": "Apache License 2.0", "lines": 161, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/wan/pipeline_wan_animate.py
# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from copy import deepcopy from typing import Any, Callable import PIL import regex as re import torch import torch.nn.functional as F from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import WanLoraLoaderMixin from ...models import AutoencoderKLWan, WanAnimateTransformer3DModel from ...schedulers import UniPCMultistepScheduler from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .image_processor import WanAnimateImageProcessor from .pipeline_output import WanPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> import numpy as np >>> from diffusers import WanAnimatePipeline >>> from diffusers.utils import export_to_video, load_image, load_video >>> model_id = "Wan-AI/Wan2.2-Animate-14B-Diffusers" >>> pipe = WanAnimatePipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16) >>> # Optionally upcast the Wan VAE to FP32 >>> pipe.vae.to(torch.float32) >>> pipe.to("cuda") >>> # Load the reference character image >>> image = load_image( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg" ... ) >>> # Load pose and face videos (preprocessed from reference video) >>> # Note: Videos should be preprocessed to extract pose keypoints and face features >>> # Refer to the Wan-Animate preprocessing documentation for details >>> pose_video = load_video("path/to/pose_video.mp4") >>> face_video = load_video("path/to/face_video.mp4") >>> # CFG is generally not used for Wan Animate >>> prompt = ( ... "An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in " ... "the background. High quality, ultrarealistic detail and breath-taking movie-like camera shot." ... ) >>> # Animation mode: Animate the character with the motion from pose/face videos >>> output = pipe( ... image=image, ... pose_video=pose_video, ... face_video=face_video, ... prompt=prompt, ... height=height, ... width=width, ... segment_frame_length=77, # Frame length of each inference segment ... guidance_scale=1.0, ... num_inference_steps=20, ... mode="animate", ... ).frames[0] >>> export_to_video(output, "output_animation.mp4", fps=30) >>> # Replacement mode: Replace a character in the background video >>> # Requires additional background_video and mask_video inputs >>> background_video = load_video("path/to/background_video.mp4") >>> mask_video = load_video("path/to/mask_video.mp4") # Black areas preserved, white areas generated >>> output = pipe( ... image=image, ... pose_video=pose_video, ... face_video=face_video, ... background_video=background_video, ... mask_video=mask_video, ... prompt=prompt, ... height=height, ... width=width, ... segment_frame_length=77, # Frame length of each inference segment ... guidance_scale=1.0, ... num_inference_steps=20, ... mode="replace", ... ).frames[0] >>> export_to_video(output, "output_replacement.mp4", fps=30) ``` """ def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): text = whitespace_clean(basic_clean(text)) return text # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class WanAnimatePipeline(DiffusionPipeline, WanLoraLoaderMixin): r""" Pipeline for unified character animation and replacement using Wan-Animate. WanAnimatePipeline takes a character image, pose video, and face video as input, and generates a video in two modes: 1. **Animation mode**: The model generates a video of the character image that mimics the human motion in the input pose and face videos. The character is animated based on the provided motion controls, creating a new animated video of the character. 2. **Replacement mode**: The model replaces a character in a background video with the provided character image, using the pose and face videos for motion control. This mode requires additional `background_video` and `mask_video` inputs. The mask video should have black regions where the original content should be preserved and white regions where the new character should be generated. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.WanLoraLoaderMixin.load_lora_weights`] for loading LoRA weights Args: tokenizer ([`T5Tokenizer`]): Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. image_encoder ([`CLIPVisionModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModel), specifically the [clip-vit-huge-patch14](https://github.com/mlfoundations/open_clip/blob/main/docs/PRETRAINED.md#vit-h14-xlm-roberta-large) variant. transformer ([`WanAnimateTransformer3DModel`]): Conditional Transformer to denoise the input latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. image_processor ([`CLIPImageProcessor`]): Image processor for preprocessing images before encoding. """ model_cpu_offload_seq = "text_encoder->image_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: UMT5EncoderModel, vae: AutoencoderKLWan, scheduler: UniPCMultistepScheduler, image_processor: CLIPImageProcessor, image_encoder: CLIPVisionModel, transformer: WanAnimateTransformer3DModel, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, image_encoder=image_encoder, transformer=transformer, scheduler=scheduler, image_processor=image_processor, ) self.vae_scale_factor_temporal = self.vae.config.scale_factor_temporal if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.config.scale_factor_spatial if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) self.video_processor_for_mask = VideoProcessor( vae_scale_factor=self.vae_scale_factor_spatial, do_normalize=False, do_convert_grayscale=True ) # In case self.transformer is None (e.g. for some pipeline tests) spatial_patch_size = self.transformer.config.patch_size[-2:] if self.transformer is not None else (2, 2) self.vae_image_processor = WanAnimateImageProcessor( vae_scale_factor=self.vae_scale_factor_spatial, spatial_patch_size=spatial_patch_size, resample="bilinear", fill_color=0, ) self.image_processor = image_processor def _get_t5_prompt_embeds( self, prompt: str | list[str] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 512, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_clean(u) for u in prompt] batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask seq_lens = mask.gt(0).sum(dim=1).long() prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.wan.pipeline_wan_i2v.WanImageToVideoPipeline.encode_image def encode_image( self, image: PipelineImageInput, device: torch.device | None = None, ): device = device or self._execution_device image = self.image_processor(images=image, return_tensors="pt").to(device) image_embeds = self.image_encoder(**image, output_hidden_states=True) return image_embeds.hidden_states[-2] # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, max_sequence_length: int = 226, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds def check_inputs( self, prompt, negative_prompt, image, pose_video, face_video, background_video, mask_video, height, width, prompt_embeds=None, negative_prompt_embeds=None, image_embeds=None, callback_on_step_end_tensor_inputs=None, mode=None, prev_segment_conditioning_frames=None, ): if image is not None and image_embeds is not None: raise ValueError( f"Cannot forward both `image`: {image} and `image_embeds`: {image_embeds}. Please make sure to" " only forward one of the two." ) if image is None and image_embeds is None: raise ValueError( "Provide either `image` or `prompt_embeds`. Cannot leave both `image` and `image_embeds` undefined." ) if image is not None and not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image): raise ValueError(f"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` but is {type(image)}") if pose_video is None: raise ValueError("Provide `pose_video`. Cannot leave `pose_video` undefined.") if face_video is None: raise ValueError("Provide `face_video`. Cannot leave `face_video` undefined.") if not isinstance(pose_video, list) or not isinstance(face_video, list): raise ValueError("`pose_video` and `face_video` must be lists of PIL images.") if len(pose_video) == 0 or len(face_video) == 0: raise ValueError("`pose_video` and `face_video` must contain at least one frame.") if mode == "replace" and (background_video is None or mask_video is None): raise ValueError( "Provide `background_video` and `mask_video`. Cannot leave both `background_video` and `mask_video`" " undefined when mode is `replace`." ) if mode == "replace" and (not isinstance(background_video, list) or not isinstance(mask_video, list)): raise ValueError("`background_video` and `mask_video` must be lists of PIL images when mode is `replace`.") if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found" f" {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") if mode is not None and (not isinstance(mode, str) or mode not in ("animate", "replace")): raise ValueError( f"`mode` has to be of type `str` and in ('animate', 'replace') but its type is {type(mode)} and value is {mode}" ) if prev_segment_conditioning_frames is not None and ( not isinstance(prev_segment_conditioning_frames, int) or prev_segment_conditioning_frames not in (1, 5) ): raise ValueError( f"`prev_segment_conditioning_frames` has to be of type `int` and 1 or 5 but its type is" f" {type(prev_segment_conditioning_frames)} and value is {prev_segment_conditioning_frames}" ) def get_i2v_mask( self, batch_size: int, latent_t: int, latent_h: int, latent_w: int, mask_len: int = 1, mask_pixel_values: torch.Tensor | None = None, dtype: torch.dtype | None = None, device: str | torch.device = "cuda", ) -> torch.Tensor: # mask_pixel_values shape (if supplied): [B, C = 1, T, latent_h, latent_w] if mask_pixel_values is None: mask_lat_size = torch.zeros( batch_size, 1, (latent_t - 1) * 4 + 1, latent_h, latent_w, dtype=dtype, device=device ) else: mask_lat_size = mask_pixel_values.clone().to(device=device, dtype=dtype) mask_lat_size[:, :, :mask_len] = 1 first_frame_mask = mask_lat_size[:, :, 0:1] # Repeat first frame mask self.vae_scale_factor_temporal (= 4) times in the frame dimension first_frame_mask = torch.repeat_interleave(first_frame_mask, dim=2, repeats=self.vae_scale_factor_temporal) mask_lat_size = torch.concat([first_frame_mask, mask_lat_size[:, :, 1:]], dim=2) mask_lat_size = mask_lat_size.view( batch_size, -1, self.vae_scale_factor_temporal, latent_h, latent_w ).transpose(1, 2) # [B, C = 1, 4 * T_lat, H_lat, W_lat] --> [B, C = 4, T_lat, H_lat, W_lat] return mask_lat_size def prepare_reference_image_latents( self, image: torch.Tensor, batch_size: int = 1, sample_mode: int = "argmax", generator: torch.Generator | list[torch.Generator] | None = None, dtype: torch.dtype | None = None, device: torch.device | None = None, ) -> torch.Tensor: # image shape: (B, C, H, W) or (B, C, T, H, W) dtype = dtype or self.vae.dtype if image.ndim == 4: # Add a singleton frame dimension after the channels dimension image = image.unsqueeze(2) _, _, _, height, width = image.shape latent_height = height // self.vae_scale_factor_spatial latent_width = width // self.vae_scale_factor_spatial # Encode image to latents using VAE image = image.to(device=device, dtype=dtype) if isinstance(generator, list): # Like in prepare_latents, assume len(generator) == batch_size ref_image_latents = [ retrieve_latents(self.vae.encode(image), generator=g, sample_mode=sample_mode) for g in generator ] ref_image_latents = torch.cat(ref_image_latents) else: ref_image_latents = retrieve_latents(self.vae.encode(image), generator, sample_mode) # Standardize latents in preparation for Wan VAE encode latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(ref_image_latents.device, ref_image_latents.dtype) ) latents_recip_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( ref_image_latents.device, ref_image_latents.dtype ) ref_image_latents = (ref_image_latents - latents_mean) * latents_recip_std # Handle the case where we supply one image and one generator, but batch_size > 1 (e.g. generating multiple # videos per prompt) if ref_image_latents.shape[0] == 1 and batch_size > 1: ref_image_latents = ref_image_latents.expand(batch_size, -1, -1, -1, -1) # Prepare I2V mask in latent space and prepend to the reference image latents along channel dim reference_image_mask = self.get_i2v_mask(batch_size, 1, latent_height, latent_width, 1, None, dtype, device) reference_image_latents = torch.cat([reference_image_mask, ref_image_latents], dim=1) return reference_image_latents def prepare_prev_segment_cond_latents( self, prev_segment_cond_video: torch.Tensor | None = None, background_video: torch.Tensor | None = None, mask_video: torch.Tensor | None = None, batch_size: int = 1, segment_frame_length: int = 77, start_frame: int = 0, height: int = 720, width: int = 1280, prev_segment_cond_frames: int = 1, task: str = "animate", interpolation_mode: str = "bicubic", sample_mode: str = "argmax", generator: torch.Generator | list[torch.Generator] | None = None, dtype: torch.dtype | None = None, device: torch.device | None = None, ) -> torch.Tensor: # prev_segment_cond_video shape: (B, C, T, H, W) in pixel space if supplied # background_video shape: (B, C, T, H, W) (same as prev_segment_cond_video shape) # mask_video shape: (B, 1, T, H, W) (same as prev_segment_cond_video, but with only 1 channel) dtype = dtype or self.vae.dtype if prev_segment_cond_video is None: if task == "replace": prev_segment_cond_video = background_video[:, :, :prev_segment_cond_frames].to(dtype) else: cond_frames_shape = (batch_size, 3, prev_segment_cond_frames, height, width) # In pixel space prev_segment_cond_video = torch.zeros(cond_frames_shape, dtype=dtype, device=device) data_batch_size, channels, _, segment_height, segment_width = prev_segment_cond_video.shape num_latent_frames = (segment_frame_length - 1) // self.vae_scale_factor_temporal + 1 latent_height = height // self.vae_scale_factor_spatial latent_width = width // self.vae_scale_factor_spatial if segment_height != height or segment_width != width: print( f"Interpolating prev segment cond video from ({segment_width}, {segment_height}) to ({width}, {height})" ) # Perform a 4D (spatial) rather than a 5D (spatiotemporal) reshape, following the original code prev_segment_cond_video = prev_segment_cond_video.transpose(1, 2).flatten(0, 1) # [B * T, C, H, W] prev_segment_cond_video = F.interpolate( prev_segment_cond_video, size=(height, width), mode=interpolation_mode ) prev_segment_cond_video = prev_segment_cond_video.unflatten(0, (batch_size, -1)).transpose(1, 2) # Fill the remaining part of the cond video segment with zeros (if animating) or the background video (if # replacing). if task == "replace": remaining_segment = background_video[:, :, prev_segment_cond_frames:].to(dtype) else: remaining_segment_frames = segment_frame_length - prev_segment_cond_frames remaining_segment = torch.zeros( batch_size, channels, remaining_segment_frames, height, width, dtype=dtype, device=device ) # Prepend the conditioning frames from the previous segment to the remaining segment video in the frame dim prev_segment_cond_video = prev_segment_cond_video.to(dtype=dtype) full_segment_cond_video = torch.cat([prev_segment_cond_video, remaining_segment], dim=2) if isinstance(generator, list): if data_batch_size == len(generator): prev_segment_cond_latents = [ retrieve_latents(self.vae.encode(full_segment_cond_video[i].unsqueeze(0)), g, sample_mode) for i, g in enumerate(generator) ] elif data_batch_size == 1: # Like prepare_latents, assume len(generator) == batch_size prev_segment_cond_latents = [ retrieve_latents(self.vae.encode(full_segment_cond_video), g, sample_mode) for g in generator ] else: raise ValueError( f"The batch size of the prev segment video should be either {len(generator)} or 1 but is" f" {data_batch_size}" ) prev_segment_cond_latents = torch.cat(prev_segment_cond_latents) else: prev_segment_cond_latents = retrieve_latents( self.vae.encode(full_segment_cond_video), generator, sample_mode ) # Standardize latents in preparation for Wan VAE encode latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(prev_segment_cond_latents.device, prev_segment_cond_latents.dtype) ) latents_recip_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( prev_segment_cond_latents.device, prev_segment_cond_latents.dtype ) prev_segment_cond_latents = (prev_segment_cond_latents - latents_mean) * latents_recip_std # Prepare I2V mask if task == "replace": mask_video = 1 - mask_video mask_video = mask_video.permute(0, 2, 1, 3, 4) mask_video = mask_video.flatten(0, 1) mask_video = F.interpolate(mask_video, size=(latent_height, latent_width), mode="nearest") mask_pixel_values = mask_video.unflatten(0, (batch_size, -1)) mask_pixel_values = mask_pixel_values.permute(0, 2, 1, 3, 4) # output shape: [B, C = 1, T, H_lat, W_lat] else: mask_pixel_values = None prev_segment_cond_mask = self.get_i2v_mask( batch_size, num_latent_frames, latent_height, latent_width, mask_len=prev_segment_cond_frames if start_frame > 0 else 0, mask_pixel_values=mask_pixel_values, dtype=dtype, device=device, ) # Prepend cond I2V mask to prev segment cond latents along channel dimension prev_segment_cond_latents = torch.cat([prev_segment_cond_mask, prev_segment_cond_latents], dim=1) return prev_segment_cond_latents def prepare_pose_latents( self, pose_video: torch.Tensor, batch_size: int = 1, sample_mode: int = "argmax", generator: torch.Generator | list[torch.Generator] | None = None, dtype: torch.dtype | None = None, device: torch.device | None = None, ) -> torch.Tensor: # pose_video shape: (B, C, T, H, W) pose_video = pose_video.to(device=device, dtype=dtype if dtype is not None else self.vae.dtype) if isinstance(generator, list): pose_latents = [ retrieve_latents(self.vae.encode(pose_video), generator=g, sample_mode=sample_mode) for g in generator ] pose_latents = torch.cat(pose_latents) else: pose_latents = retrieve_latents(self.vae.encode(pose_video), generator, sample_mode) # Standardize latents in preparation for Wan VAE encode latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(pose_latents.device, pose_latents.dtype) ) latents_recip_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( pose_latents.device, pose_latents.dtype ) pose_latents = (pose_latents - latents_mean) * latents_recip_std if pose_latents.shape[0] == 1 and batch_size > 1: pose_latents = pose_latents.expand(batch_size, -1, -1, -1, -1) return pose_latents def prepare_latents( self, batch_size: int, num_channels_latents: int = 16, height: int = 720, width: int = 1280, num_frames: int = 77, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 latent_height = height // self.vae_scale_factor_spatial latent_width = width // self.vae_scale_factor_spatial shape = (batch_size, num_channels_latents, num_latent_frames + 1, latent_height, latent_width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) return latents def pad_video_frames(self, frames: list[Any], num_target_frames: int) -> list[Any]: """ Pads an array-like video `frames` to `num_target_frames` using a "reflect"-like strategy. The frame dimension is assumed to be the first dimension. In the 1D case, we can visualize this strategy as follows: pad_video_frames([1, 2, 3, 4, 5], 10) -> [1, 2, 3, 4, 5, 4, 3, 2, 1, 2] """ idx = 0 flip = False target_frames = [] while len(target_frames) < num_target_frames: target_frames.append(deepcopy(frames[idx])) if flip: idx -= 1 else: idx += 1 if idx == 0 or idx == len(frames) - 1: flip = not flip return target_frames @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @property def attention_kwargs(self): return self._attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, pose_video: list[PIL.Image.Image], face_video: list[PIL.Image.Image], background_video: list[PIL.Image.Image] | None = None, mask_video: list[PIL.Image.Image] | None = None, prompt: str | list[str] = None, negative_prompt: str | list[str] = None, height: int = 720, width: int = 1280, segment_frame_length: int = 77, num_inference_steps: int = 20, mode: str = "animate", prev_segment_conditioning_frames: int = 1, motion_encode_batch_size: int | None = None, guidance_scale: float = 1.0, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, image_embeds: torch.Tensor | None = None, output_type: str | None = "np", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, ): r""" The call function to the pipeline for generation. Args: image (`PipelineImageInput`): The input character image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`. pose_video (`list[PIL.Image.Image]`): The input pose video to condition the generation on. Must be a list of PIL images. face_video (`list[PIL.Image.Image]`): The input face video to condition the generation on. Must be a list of PIL images. background_video (`list[PIL.Image.Image]`, *optional*): When mode is `"replace"`, the input background video to condition the generation on. Must be a list of PIL images. mask_video (`list[PIL.Image.Image]`, *optional*): When mode is `"replace"`, the input mask video to condition the generation on. Must be a list of PIL images. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). mode (`str`, defaults to `"animation"`): The mode of the generation. Choose between `"animate"` and `"replace"`. prev_segment_conditioning_frames (`int`, defaults to `1`): The number of frames from the previous video segment to be used for temporal guidance. Recommended to be 1 or 5. In general, should be 4N + 1, where N is a non-negative integer. motion_encode_batch_size (`int`, *optional*): The batch size for batched encoding of the face video via the motion encoder. This allows trading off inference speed for lower memory usage by setting a smaller batch size. Will default to `self.transformer.config.motion_encoder_batch_size` if not set. height (`int`, defaults to `720`): The height of the generated video. width (`int`, defaults to `1280`): The width of the generated video. segment_frame_length (`int`, defaults to `77`): The number of frames in each generated video segment. The total frames of video generated will be equal to the number of frames in `pose_video`; we will generate the video in segments until we have hit this length. In general, should be 4N + 1, where N is a non-negative integer. num_inference_steps (`int`, defaults to `20`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `1.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. By default, CFG is not used in Wan Animate inference. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `negative_prompt` input argument. image_embeds (`torch.Tensor`, *optional*): Pre-generated image embeddings. Can be used to easily tweak image inputs (weighting). If not provided, image embeddings are generated from the `image` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`WanPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `512`): The maximum sequence length of the text encoder. If the prompt is longer than this, it will be truncated. If the prompt is shorter, it will be padded to this length. Examples: Returns: [`~WanPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`WanPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt, image, pose_video, face_video, background_video, mask_video, height, width, prompt_embeds, negative_prompt_embeds, image_embeds, callback_on_step_end_tensor_inputs, mode, prev_segment_conditioning_frames, ) if segment_frame_length % self.vae_scale_factor_temporal != 1: logger.warning( f"`segment_frame_length - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the" f" nearest number." ) segment_frame_length = ( segment_frame_length // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 ) segment_frame_length = max(segment_frame_length, 1) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # As we generate in segments of `segment_frame_length`, set the target frame length to be the least multiple # of the effective segment length greater than or equal to the length of `pose_video`. cond_video_frames = len(pose_video) effective_segment_length = segment_frame_length - prev_segment_conditioning_frames last_segment_frames = (cond_video_frames - prev_segment_conditioning_frames) % effective_segment_length if last_segment_frames == 0: num_padding_frames = 0 else: num_padding_frames = effective_segment_length - last_segment_frames num_target_frames = cond_video_frames + num_padding_frames num_segments = num_target_frames // effective_segment_length # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) transformer_dtype = self.transformer.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) if negative_prompt_embeds is not None: negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) # 4. Preprocess and encode the reference (character) image image_height, image_width = self.video_processor.get_default_height_width(image) if image_height != height or image_width != width: logger.warning(f"Reshaping reference image from ({image_width}, {image_height}) to ({width}, {height})") image_pixels = self.vae_image_processor.preprocess(image, height=height, width=width, resize_mode="fill").to( device, dtype=torch.float32 ) # Get CLIP features from the reference image if image_embeds is None: image_embeds = self.encode_image(image, device) image_embeds = image_embeds.repeat(batch_size * num_videos_per_prompt, 1, 1) image_embeds = image_embeds.to(transformer_dtype) # 5. Encode conditioning videos (pose, face) pose_video = self.pad_video_frames(pose_video, num_target_frames) face_video = self.pad_video_frames(face_video, num_target_frames) # TODO: also support np.ndarray input (e.g. from decord like the original implementation?) pose_video_width, pose_video_height = pose_video[0].size if pose_video_height != height or pose_video_width != width: logger.warning( f"Reshaping pose video from ({pose_video_width}, {pose_video_height}) to ({width}, {height})" ) pose_video = self.video_processor.preprocess_video(pose_video, height=height, width=width).to( device, dtype=torch.float32 ) face_video_width, face_video_height = face_video[0].size expected_face_size = self.transformer.config.motion_encoder_size if face_video_width != expected_face_size or face_video_height != expected_face_size: logger.warning( f"Reshaping face video from ({face_video_width}, {face_video_height}) to ({expected_face_size}," f" {expected_face_size})" ) face_video = self.video_processor.preprocess_video( face_video, height=expected_face_size, width=expected_face_size ).to(device, dtype=torch.float32) if mode == "replace": background_video = self.pad_video_frames(background_video, num_target_frames) mask_video = self.pad_video_frames(mask_video, num_target_frames) background_video = self.video_processor.preprocess_video(background_video, height=height, width=width).to( device, dtype=torch.float32 ) mask_video = self.video_processor_for_mask.preprocess_video(mask_video, height=height, width=width).to( device, dtype=torch.float32 ) # 6. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 7. Prepare latent variables which stay constant for all inference segments num_channels_latents = self.vae.config.z_dim # Get VAE-encoded latents of the reference (character) image reference_image_latents = self.prepare_reference_image_latents( image_pixels, batch_size * num_videos_per_prompt, generator=generator, device=device ) # 8. Loop over video inference segments start = 0 end = segment_frame_length # Data space frames, not latent frames all_out_frames = [] out_frames = None for _ in range(num_segments): assert start + prev_segment_conditioning_frames < cond_video_frames # Sample noisy latents from prior for the current inference segment latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents=num_channels_latents, height=height, width=width, num_frames=segment_frame_length, dtype=torch.float32, device=device, generator=generator, latents=latents if start == 0 else None, # Only use pre-calculated latents for first segment ) pose_video_segment = pose_video[:, :, start:end] face_video_segment = face_video[:, :, start:end] face_video_segment = face_video_segment.expand(batch_size * num_videos_per_prompt, -1, -1, -1, -1) face_video_segment = face_video_segment.to(dtype=transformer_dtype) if start > 0: prev_segment_cond_video = out_frames[:, :, -prev_segment_conditioning_frames:].clone().detach() else: prev_segment_cond_video = None if mode == "replace": background_video_segment = background_video[:, :, start:end] mask_video_segment = mask_video[:, :, start:end] background_video_segment = background_video_segment.expand( batch_size * num_videos_per_prompt, -1, -1, -1, -1 ) mask_video_segment = mask_video_segment.expand(batch_size * num_videos_per_prompt, -1, -1, -1, -1) else: background_video_segment = None mask_video_segment = None pose_latents = self.prepare_pose_latents( pose_video_segment, batch_size * num_videos_per_prompt, generator=generator, device=device ) pose_latents = pose_latents.to(dtype=transformer_dtype) prev_segment_cond_latents = self.prepare_prev_segment_cond_latents( prev_segment_cond_video, background_video=background_video_segment, mask_video=mask_video_segment, batch_size=batch_size * num_videos_per_prompt, segment_frame_length=segment_frame_length, start_frame=start, height=height, width=width, prev_segment_cond_frames=prev_segment_conditioning_frames, task=mode, generator=generator, device=device, ) # Concatenate the reference latents in the frame dimension reference_latents = torch.cat([reference_image_latents, prev_segment_cond_latents], dim=2) # 8.1 Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue self._current_timestep = t # Concatenate the reference image + prev segment conditioning in the channel dim latent_model_input = torch.cat([latents, reference_latents], dim=1).to(transformer_dtype) timestep = t.expand(latents.shape[0]) with self.transformer.cache_context("cond"): noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, encoder_hidden_states_image=image_embeds, pose_hidden_states=pose_latents, face_pixel_values=face_video_segment, motion_encode_batch_size=motion_encode_batch_size, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: # Blank out face for unconditional guidance (set all pixels to -1) face_pixel_values_uncond = face_video_segment * 0 - 1 with self.transformer.cache_context("uncond"): noise_uncond = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, encoder_hidden_states_image=image_embeds, pose_hidden_states=pose_latents, face_pixel_values=face_pixel_values_uncond, motion_encode_batch_size=motion_encode_batch_size, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() latents = latents.to(self.vae.dtype) # Destandardize latents in preparation for Wan VAE decoding latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_recip_std = 1.0 / torch.tensor(self.vae.config.latents_std).view( 1, self.vae.config.z_dim, 1, 1, 1 ).to(latents.device, latents.dtype) latents = latents / latents_recip_std + latents_mean # Skip the first latent frame (used for conditioning) out_frames = self.vae.decode(latents[:, :, 1:], return_dict=False)[0] if start > 0: out_frames = out_frames[:, :, prev_segment_conditioning_frames:] all_out_frames.append(out_frames) start += effective_segment_length end += effective_segment_length # Reset scheduler timesteps / state for next denoising loop self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps self._current_timestep = None assert start + prev_segment_conditioning_frames >= cond_video_frames if not output_type == "latent": video = torch.cat(all_out_frames, dim=2)[:, :, :cond_video_frames] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return WanPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/wan/pipeline_wan_animate.py", "license": "Apache License 2.0", "lines": 1064, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/models/transformers/test_models_transformer_wan_animate.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from diffusers import WanAnimateTransformer3DModel from diffusers.utils.torch_utils import randn_tensor from ...testing_utils import enable_full_determinism, torch_device from ..testing_utils import ( AttentionTesterMixin, BaseModelTesterConfig, BitsAndBytesTesterMixin, GGUFCompileTesterMixin, GGUFTesterMixin, MemoryTesterMixin, ModelTesterMixin, TorchAoTesterMixin, TorchCompileTesterMixin, TrainingTesterMixin, ) enable_full_determinism() class WanAnimateTransformer3DTesterConfig(BaseModelTesterConfig): @property def model_class(self): return WanAnimateTransformer3DModel @property def pretrained_model_name_or_path(self): return "hf-internal-testing/tiny-wan-animate-transformer" @property def output_shape(self) -> tuple[int, ...]: # Output has fewer channels than input (4 vs 12) return (4, 21, 16, 16) @property def input_shape(self) -> tuple[int, ...]: return (12, 21, 16, 16) @property def main_input_name(self) -> str: return "hidden_states" @property def generator(self): return torch.Generator("cpu").manual_seed(0) def get_init_dict(self) -> dict[str, int | list[int] | tuple | str | bool | float | dict]: # Use custom channel sizes since the default Wan Animate channel sizes will cause the motion encoder to # contain the vast majority of the parameters in the test model channel_sizes = {"4": 16, "8": 16, "16": 16} return { "patch_size": (1, 2, 2), "num_attention_heads": 2, "attention_head_dim": 12, "in_channels": 12, # 2 * C + 4 = 2 * 4 + 4 = 12 "latent_channels": 4, "out_channels": 4, "text_dim": 16, "freq_dim": 256, "ffn_dim": 32, "num_layers": 2, "cross_attn_norm": True, "qk_norm": "rms_norm_across_heads", "image_dim": 16, "rope_max_seq_len": 32, "motion_encoder_channel_sizes": channel_sizes, # Start of Wan Animate-specific config "motion_encoder_size": 16, # Ensures that there will be 2 motion encoder resblocks "motion_style_dim": 8, "motion_dim": 4, "motion_encoder_dim": 16, "face_encoder_hidden_dim": 16, "face_encoder_num_heads": 2, "inject_face_latents_blocks": 2, } def get_dummy_inputs(self) -> dict[str, torch.Tensor]: batch_size = 1 num_channels = 4 num_frames = 20 # To make the shapes work out; for complicated reasons we want 21 to divide num_frames + 1 height = 16 width = 16 text_encoder_embedding_dim = 16 sequence_length = 12 clip_seq_len = 12 clip_dim = 16 inference_segment_length = 77 # The inference segment length in the full Wan2.2-Animate-14B model face_height = 16 # Should be square and match `motion_encoder_size` face_width = 16 return { "hidden_states": randn_tensor( (batch_size, 2 * num_channels + 4, num_frames + 1, height, width), generator=self.generator, device=torch_device, ), "timestep": torch.randint(0, 1000, size=(batch_size,), generator=self.generator).to(torch_device), "encoder_hidden_states": randn_tensor( (batch_size, sequence_length, text_encoder_embedding_dim), generator=self.generator, device=torch_device, ), "encoder_hidden_states_image": randn_tensor( (batch_size, clip_seq_len, clip_dim), generator=self.generator, device=torch_device, ), "pose_hidden_states": randn_tensor( (batch_size, num_channels, num_frames, height, width), generator=self.generator, device=torch_device, ), "face_pixel_values": randn_tensor( (batch_size, 3, inference_segment_length, face_height, face_width), generator=self.generator, device=torch_device, ), } class TestWanAnimateTransformer3D(WanAnimateTransformer3DTesterConfig, ModelTesterMixin): """Core model tests for Wan Animate Transformer 3D.""" def test_output(self): # Override test_output because the transformer output is expected to have less channels # than the main transformer input. expected_output_shape = (1, 4, 21, 16, 16) super().test_output(expected_output_shape=expected_output_shape) @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16], ids=["fp16", "bf16"]) def test_from_save_pretrained_dtype_inference(self, tmp_path, dtype): # Skip: fp16/bf16 require very high atol (~1e-2) to pass, providing little signal. # Dtype preservation is already tested by test_from_save_pretrained_dtype and test_keep_in_fp32_modules. pytest.skip("Tolerance requirements too high for meaningful test") class TestWanAnimateTransformer3DMemory(WanAnimateTransformer3DTesterConfig, MemoryTesterMixin): """Memory optimization tests for Wan Animate Transformer 3D.""" class TestWanAnimateTransformer3DTraining(WanAnimateTransformer3DTesterConfig, TrainingTesterMixin): """Training tests for Wan Animate Transformer 3D.""" def test_gradient_checkpointing_is_applied(self): expected_set = {"WanAnimateTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class TestWanAnimateTransformer3DAttention(WanAnimateTransformer3DTesterConfig, AttentionTesterMixin): """Attention processor tests for Wan Animate Transformer 3D.""" class TestWanAnimateTransformer3DCompile(WanAnimateTransformer3DTesterConfig, TorchCompileTesterMixin): """Torch compile tests for Wan Animate Transformer 3D.""" def test_torch_compile_recompilation_and_graph_break(self): # Skip: F.pad with mode="replicate" in WanAnimateFaceEncoder triggers importlib.import_module # internally, which dynamo doesn't support tracing through. pytest.skip("F.pad with replicate mode triggers unsupported import in torch.compile") class TestWanAnimateTransformer3DBitsAndBytes(WanAnimateTransformer3DTesterConfig, BitsAndBytesTesterMixin): """BitsAndBytes quantization tests for Wan Animate Transformer 3D.""" @property def torch_dtype(self): return torch.float16 def get_dummy_inputs(self): """Override to provide inputs matching the tiny Wan Animate model dimensions.""" return { "hidden_states": randn_tensor( (1, 36, 21, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states": randn_tensor( (1, 512, 4096), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states_image": randn_tensor( (1, 257, 1280), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "pose_hidden_states": randn_tensor( (1, 16, 20, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "face_pixel_values": randn_tensor( (1, 3, 77, 512, 512), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "timestep": torch.tensor([1.0]).to(torch_device, self.torch_dtype), } class TestWanAnimateTransformer3DTorchAo(WanAnimateTransformer3DTesterConfig, TorchAoTesterMixin): """TorchAO quantization tests for Wan Animate Transformer 3D.""" @property def torch_dtype(self): return torch.bfloat16 def get_dummy_inputs(self): """Override to provide inputs matching the tiny Wan Animate model dimensions.""" return { "hidden_states": randn_tensor( (1, 36, 21, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states": randn_tensor( (1, 512, 4096), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states_image": randn_tensor( (1, 257, 1280), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "pose_hidden_states": randn_tensor( (1, 16, 20, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "face_pixel_values": randn_tensor( (1, 3, 77, 512, 512), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "timestep": torch.tensor([1.0]).to(torch_device, self.torch_dtype), } class TestWanAnimateTransformer3DGGUF(WanAnimateTransformer3DTesterConfig, GGUFTesterMixin): """GGUF quantization tests for Wan Animate Transformer 3D.""" @property def gguf_filename(self): return "https://huggingface.co/QuantStack/Wan2.2-Animate-14B-GGUF/blob/main/Wan2.2-Animate-14B-Q2_K.gguf" @property def torch_dtype(self): return torch.bfloat16 def get_dummy_inputs(self): """Override to provide inputs matching the real Wan Animate model dimensions. Wan 2.2 Animate: in_channels=36 (2*16+4), text_dim=4096, image_dim=1280 """ return { "hidden_states": randn_tensor( (1, 36, 21, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states": randn_tensor( (1, 512, 4096), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states_image": randn_tensor( (1, 257, 1280), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "pose_hidden_states": randn_tensor( (1, 16, 20, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "face_pixel_values": randn_tensor( (1, 3, 77, 512, 512), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "timestep": torch.tensor([1.0]).to(torch_device, self.torch_dtype), } class TestWanAnimateTransformer3DGGUFCompile(WanAnimateTransformer3DTesterConfig, GGUFCompileTesterMixin): """GGUF + compile tests for Wan Animate Transformer 3D.""" @property def gguf_filename(self): return "https://huggingface.co/QuantStack/Wan2.2-Animate-14B-GGUF/blob/main/Wan2.2-Animate-14B-Q2_K.gguf" @property def torch_dtype(self): return torch.bfloat16 def get_dummy_inputs(self): """Override to provide inputs matching the real Wan Animate model dimensions. Wan 2.2 Animate: in_channels=36 (2*16+4), text_dim=4096, image_dim=1280 """ return { "hidden_states": randn_tensor( (1, 36, 21, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states": randn_tensor( (1, 512, 4096), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "encoder_hidden_states_image": randn_tensor( (1, 257, 1280), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "pose_hidden_states": randn_tensor( (1, 16, 20, 64, 64), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "face_pixel_values": randn_tensor( (1, 3, 77, 512, 512), generator=self.generator, device=torch_device, dtype=self.torch_dtype ), "timestep": torch.tensor([1.0]).to(torch_device, self.torch_dtype), }
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_wan_animate.py", "license": "Apache License 2.0", "lines": 257, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/pipelines/wan/test_wan_animate.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from PIL import Image from transformers import ( AutoConfig, AutoTokenizer, CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection, T5EncoderModel, ) from diffusers import ( AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanAnimatePipeline, WanAnimateTransformer3DModel, ) from ...testing_utils import ( backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WanAnimatePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WanAnimatePipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5") text_encoder = T5EncoderModel(config) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) channel_sizes = {"4": 16, "8": 16, "16": 16} transformer = WanAnimateTransformer3DModel( patch_size=(1, 2, 2), num_attention_heads=2, attention_head_dim=12, in_channels=36, latent_channels=16, out_channels=16, text_dim=32, freq_dim=256, ffn_dim=32, num_layers=2, cross_attn_norm=True, qk_norm="rms_norm_across_heads", image_dim=4, rope_max_seq_len=32, motion_encoder_channel_sizes=channel_sizes, motion_encoder_size=16, motion_style_dim=8, motion_dim=4, motion_encoder_dim=16, face_encoder_hidden_dim=16, face_encoder_num_heads=2, inject_face_latents_blocks=2, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=4, projection_dim=4, num_hidden_layers=2, num_attention_heads=2, image_size=4, intermediate_size=16, patch_size=1, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) torch.manual_seed(0) image_processor = CLIPImageProcessor(crop_size=4, size=4) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "image_encoder": image_encoder, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) num_frames = 17 height = 16 width = 16 face_height = 16 face_width = 16 image = Image.new("RGB", (height, width)) pose_video = [Image.new("RGB", (height, width))] * num_frames face_video = [Image.new("RGB", (face_height, face_width))] * num_frames inputs = { "image": image, "pose_video": pose_video, "face_video": face_video, "prompt": "dance monkey", "negative_prompt": "negative", "height": height, "width": width, "segment_frame_length": 77, # TODO: can we set this to num_frames? "num_inference_steps": 2, "mode": "animate", "prev_segment_conditioning_frames": 1, "generator": generator, "guidance_scale": 1.0, "output_type": "pt", "max_sequence_length": 16, } return inputs def test_inference(self): """Test basic inference in animation mode.""" device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames[0] self.assertEqual(video.shape, (17, 3, 16, 16)) expected_video = torch.randn(17, 3, 16, 16) max_diff = np.abs(video - expected_video).max() self.assertLessEqual(max_diff, 1e10) def test_inference_replacement(self): """Test the pipeline in replacement mode with background and mask videos.""" device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["mode"] = "replace" num_frames = 17 height = 16 width = 16 inputs["background_video"] = [Image.new("RGB", (height, width))] * num_frames inputs["mask_video"] = [Image.new("L", (height, width))] * num_frames video = pipe(**inputs).frames[0] self.assertEqual(video.shape, (17, 3, 16, 16)) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass @unittest.skip( "Setting the Wan Animate latents to zero at the last denoising step does not guarantee that the output will be" " zero. I believe this is because the latents are further processed in the outer loop where we loop over" " inference segments." ) def test_callback_inputs(self): pass @slow @require_torch_accelerator class WanAnimatePipelineIntegrationTests(unittest.TestCase): prompt = "A painting of a squirrel eating a burger." def setUp(self): super().setUp() gc.collect() backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) @unittest.skip("TODO: test needs to be implemented") def test_wan_animate(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/wan/test_wan_animate.py", "license": "Apache License 2.0", "lines": 208, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/modular_pipelines/qwen/test_modular_pipeline_qwenimage.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import PIL import pytest from diffusers.modular_pipelines import ( QwenImageAutoBlocks, QwenImageEditAutoBlocks, QwenImageEditModularPipeline, QwenImageEditPlusAutoBlocks, QwenImageEditPlusModularPipeline, QwenImageModularPipeline, ) from ...testing_utils import torch_device from ..test_modular_pipelines_common import ModularGuiderTesterMixin, ModularPipelineTesterMixin QWEN_IMAGE_TEXT2IMAGE_WORKFLOWS = { "text2image": [ ("text_encoder", "QwenImageTextEncoderStep"), ("denoise.input", "QwenImageTextInputsStep"), ("denoise.prepare_latents", "QwenImagePrepareLatentsStep"), ("denoise.set_timesteps", "QwenImageSetTimestepsStep"), ("denoise.prepare_rope_inputs", "QwenImageRoPEInputsStep"), ("denoise.denoise", "QwenImageDenoiseStep"), ("denoise.after_denoise", "QwenImageAfterDenoiseStep"), ("decode.decode", "QwenImageDecoderStep"), ("decode.postprocess", "QwenImageProcessImagesOutputStep"), ], "image2image": [ ("text_encoder", "QwenImageTextEncoderStep"), ("vae_encoder.preprocess", "QwenImageProcessImagesInputStep"), ("vae_encoder.encode", "QwenImageVaeEncoderStep"), ("denoise.input.text_inputs", "QwenImageTextInputsStep"), ("denoise.input.additional_inputs", "QwenImageAdditionalInputsStep"), ("denoise.prepare_latents", "QwenImagePrepareLatentsStep"), ("denoise.set_timesteps", "QwenImageSetTimestepsWithStrengthStep"), ("denoise.prepare_img2img_latents", "QwenImagePrepareLatentsWithStrengthStep"), ("denoise.prepare_rope_inputs", "QwenImageRoPEInputsStep"), ("denoise.denoise", "QwenImageDenoiseStep"), ("denoise.after_denoise", "QwenImageAfterDenoiseStep"), ("decode.decode", "QwenImageDecoderStep"), ("decode.postprocess", "QwenImageProcessImagesOutputStep"), ], "inpainting": [ ("text_encoder", "QwenImageTextEncoderStep"), ("vae_encoder.preprocess", "QwenImageInpaintProcessImagesInputStep"), ("vae_encoder.encode", "QwenImageVaeEncoderStep"), ("denoise.input.text_inputs", "QwenImageTextInputsStep"), ("denoise.input.additional_inputs", "QwenImageAdditionalInputsStep"), ("denoise.prepare_latents", "QwenImagePrepareLatentsStep"), ("denoise.set_timesteps", "QwenImageSetTimestepsWithStrengthStep"), ("denoise.prepare_inpaint_latents.add_noise_to_latents", "QwenImagePrepareLatentsWithStrengthStep"), ("denoise.prepare_inpaint_latents.create_mask_latents", "QwenImageCreateMaskLatentsStep"), ("denoise.prepare_rope_inputs", "QwenImageRoPEInputsStep"), ("denoise.denoise", "QwenImageInpaintDenoiseStep"), ("denoise.after_denoise", "QwenImageAfterDenoiseStep"), ("decode.decode", "QwenImageDecoderStep"), ("decode.postprocess", "QwenImageInpaintProcessImagesOutputStep"), ], "controlnet_text2image": [ ("text_encoder", "QwenImageTextEncoderStep"), ("controlnet_vae_encoder", "QwenImageControlNetVaeEncoderStep"), ("denoise.input", "QwenImageTextInputsStep"), ("denoise.controlnet_input", "QwenImageControlNetInputsStep"), ("denoise.prepare_latents", "QwenImagePrepareLatentsStep"), ("denoise.set_timesteps", "QwenImageSetTimestepsStep"), ("denoise.prepare_rope_inputs", "QwenImageRoPEInputsStep"), ("denoise.controlnet_before_denoise", "QwenImageControlNetBeforeDenoiserStep"), ("denoise.controlnet_denoise", "QwenImageControlNetDenoiseStep"), ("denoise.after_denoise", "QwenImageAfterDenoiseStep"), ("decode.decode", "QwenImageDecoderStep"), ("decode.postprocess", "QwenImageProcessImagesOutputStep"), ], "controlnet_image2image": [ ("text_encoder", "QwenImageTextEncoderStep"), ("vae_encoder.preprocess", "QwenImageProcessImagesInputStep"), ("vae_encoder.encode", "QwenImageVaeEncoderStep"), ("controlnet_vae_encoder", "QwenImageControlNetVaeEncoderStep"), ("denoise.input.text_inputs", "QwenImageTextInputsStep"), ("denoise.input.additional_inputs", "QwenImageAdditionalInputsStep"), ("denoise.controlnet_input", "QwenImageControlNetInputsStep"), ("denoise.prepare_latents", "QwenImagePrepareLatentsStep"), ("denoise.set_timesteps", "QwenImageSetTimestepsWithStrengthStep"), ("denoise.prepare_img2img_latents", "QwenImagePrepareLatentsWithStrengthStep"), ("denoise.prepare_rope_inputs", "QwenImageRoPEInputsStep"), ("denoise.controlnet_before_denoise", "QwenImageControlNetBeforeDenoiserStep"), ("denoise.controlnet_denoise", "QwenImageControlNetDenoiseStep"), ("denoise.after_denoise", "QwenImageAfterDenoiseStep"), ("decode.decode", "QwenImageDecoderStep"), ("decode.postprocess", "QwenImageProcessImagesOutputStep"), ], "controlnet_inpainting": [ ("text_encoder", "QwenImageTextEncoderStep"), ("vae_encoder.preprocess", "QwenImageInpaintProcessImagesInputStep"), ("vae_encoder.encode", "QwenImageVaeEncoderStep"), ("controlnet_vae_encoder", "QwenImageControlNetVaeEncoderStep"), ("denoise.input.text_inputs", "QwenImageTextInputsStep"), ("denoise.input.additional_inputs", "QwenImageAdditionalInputsStep"), ("denoise.controlnet_input", "QwenImageControlNetInputsStep"), ("denoise.prepare_latents", "QwenImagePrepareLatentsStep"), ("denoise.set_timesteps", "QwenImageSetTimestepsWithStrengthStep"), ("denoise.prepare_inpaint_latents.add_noise_to_latents", "QwenImagePrepareLatentsWithStrengthStep"), ("denoise.prepare_inpaint_latents.create_mask_latents", "QwenImageCreateMaskLatentsStep"), ("denoise.prepare_rope_inputs", "QwenImageRoPEInputsStep"), ("denoise.controlnet_before_denoise", "QwenImageControlNetBeforeDenoiserStep"), ("denoise.controlnet_denoise", "QwenImageInpaintControlNetDenoiseStep"), ("denoise.after_denoise", "QwenImageAfterDenoiseStep"), ("decode.decode", "QwenImageDecoderStep"), ("decode.postprocess", "QwenImageInpaintProcessImagesOutputStep"), ], } class TestQwenImageModularPipelineFast(ModularPipelineTesterMixin, ModularGuiderTesterMixin): pipeline_class = QwenImageModularPipeline pipeline_blocks_class = QwenImageAutoBlocks pretrained_model_name_or_path = "hf-internal-testing/tiny-qwenimage-modular" params = frozenset(["prompt", "height", "width", "negative_prompt", "attention_kwargs", "image", "mask_image"]) batch_params = frozenset(["prompt", "negative_prompt", "image", "mask_image"]) expected_workflow_blocks = QWEN_IMAGE_TEXT2IMAGE_WORKFLOWS def get_dummy_inputs(self): generator = self.get_generator() inputs = { "prompt": "dance monkey", "negative_prompt": "bad quality", "generator": generator, "num_inference_steps": 2, "height": 32, "width": 32, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) QWEN_IMAGE_EDIT_WORKFLOWS = { "image_conditioned": [ ("text_encoder.resize", "QwenImageEditResizeStep"), ("text_encoder.encode", "QwenImageEditTextEncoderStep"), ("vae_encoder.resize", "QwenImageEditResizeStep"), ("vae_encoder.preprocess", "QwenImageEditProcessImagesInputStep"), ("vae_encoder.encode", "QwenImageVaeEncoderStep"), ("denoise.input.text_inputs", "QwenImageTextInputsStep"), ("denoise.input.additional_inputs", "QwenImageAdditionalInputsStep"), ("denoise.prepare_latents", "QwenImagePrepareLatentsStep"), ("denoise.set_timesteps", "QwenImageSetTimestepsStep"), ("denoise.prepare_rope_inputs", "QwenImageEditRoPEInputsStep"), ("denoise.denoise", "QwenImageEditDenoiseStep"), ("denoise.after_denoise", "QwenImageAfterDenoiseStep"), ("decode.decode", "QwenImageDecoderStep"), ("decode.postprocess", "QwenImageProcessImagesOutputStep"), ], "image_conditioned_inpainting": [ ("text_encoder.resize", "QwenImageEditResizeStep"), ("text_encoder.encode", "QwenImageEditTextEncoderStep"), ("vae_encoder.resize", "QwenImageEditResizeStep"), ("vae_encoder.preprocess", "QwenImageEditInpaintProcessImagesInputStep"), ("vae_encoder.encode", "QwenImageVaeEncoderStep"), ("denoise.input.text_inputs", "QwenImageTextInputsStep"), ("denoise.input.additional_inputs", "QwenImageAdditionalInputsStep"), ("denoise.prepare_latents", "QwenImagePrepareLatentsStep"), ("denoise.set_timesteps", "QwenImageSetTimestepsWithStrengthStep"), ("denoise.prepare_inpaint_latents.add_noise_to_latents", "QwenImagePrepareLatentsWithStrengthStep"), ("denoise.prepare_inpaint_latents.create_mask_latents", "QwenImageCreateMaskLatentsStep"), ("denoise.prepare_rope_inputs", "QwenImageEditRoPEInputsStep"), ("denoise.denoise", "QwenImageEditInpaintDenoiseStep"), ("denoise.after_denoise", "QwenImageAfterDenoiseStep"), ("decode.decode", "QwenImageDecoderStep"), ("decode.postprocess", "QwenImageInpaintProcessImagesOutputStep"), ], } class TestQwenImageEditModularPipelineFast(ModularPipelineTesterMixin, ModularGuiderTesterMixin): pipeline_class = QwenImageEditModularPipeline pipeline_blocks_class = QwenImageEditAutoBlocks pretrained_model_name_or_path = "hf-internal-testing/tiny-qwenimage-edit-modular" params = frozenset(["prompt", "height", "width", "negative_prompt", "attention_kwargs", "image", "mask_image"]) batch_params = frozenset(["prompt", "negative_prompt", "image", "mask_image"]) expected_workflow_blocks = QWEN_IMAGE_EDIT_WORKFLOWS def get_dummy_inputs(self): generator = self.get_generator() inputs = { "prompt": "dance monkey", "negative_prompt": "bad quality", "generator": generator, "num_inference_steps": 2, "height": 32, "width": 32, "output_type": "pt", } inputs["image"] = PIL.Image.new("RGB", (32, 32), 0) return inputs def test_guider_cfg(self): super().test_guider_cfg(7e-5) class TestQwenImageEditPlusModularPipelineFast(ModularPipelineTesterMixin, ModularGuiderTesterMixin): pipeline_class = QwenImageEditPlusModularPipeline pipeline_blocks_class = QwenImageEditPlusAutoBlocks pretrained_model_name_or_path = "hf-internal-testing/tiny-qwenimage-edit-plus-modular" # No `mask_image` yet. params = frozenset(["prompt", "height", "width", "negative_prompt", "attention_kwargs", "image"]) batch_params = frozenset(["prompt", "negative_prompt", "image"]) def get_dummy_inputs(self): generator = self.get_generator() inputs = { "prompt": "dance monkey", "negative_prompt": "bad quality", "generator": generator, "num_inference_steps": 2, "height": 32, "width": 32, "output_type": "pt", } inputs["image"] = PIL.Image.new("RGB", (32, 32), 0) return inputs def test_multi_images_as_input(self): inputs = self.get_dummy_inputs() image = inputs.pop("image") inputs["image"] = [image, image] pipe = self.get_pipeline().to(torch_device) _ = pipe( **inputs, ) @pytest.mark.xfail(condition=True, reason="Batch of multiple images needs to be revisited", strict=True) def test_num_images_per_prompt(self): super().test_num_images_per_prompt() @pytest.mark.xfail(condition=True, reason="Batch of multiple images needs to be revisited", strict=True) def test_inference_batch_consistent(): super().test_inference_batch_consistent() @pytest.mark.xfail(condition=True, reason="Batch of multiple images needs to be revisited", strict=True) def test_inference_batch_single_identical(): super().test_inference_batch_single_identical() def test_guider_cfg(self): super().test_guider_cfg(1e-6)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/modular_pipelines/qwen/test_modular_pipeline_qwenimage.py", "license": "Apache License 2.0", "lines": 240, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/models/transformers/transformer_chronoedit.py
# Copyright 2025 The ChronoEdit Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import apply_lora_scale, deprecate, logging from ...utils.torch_utils import maybe_allow_in_graph from .._modeling_parallel import ContextParallelInput, ContextParallelOutput from ..attention import AttentionMixin, AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..cache_utils import CacheMixin from ..embeddings import PixArtAlphaTextProjection, TimestepEmbedding, Timesteps, get_1d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import FP32LayerNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.models.transformers.transformer_wan._get_qkv_projections def _get_qkv_projections(attn: "WanAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor): # encoder_hidden_states is only passed for cross-attention if encoder_hidden_states is None: encoder_hidden_states = hidden_states if attn.fused_projections: if not attn.is_cross_attention: # In self-attention layers, we can fuse the entire QKV projection into a single linear query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) else: # In cross-attention layers, we can only fuse the KV projections into a single linear query = attn.to_q(hidden_states) key, value = attn.to_kv(encoder_hidden_states).chunk(2, dim=-1) else: query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) return query, key, value # Copied from diffusers.models.transformers.transformer_wan._get_added_kv_projections def _get_added_kv_projections(attn: "WanAttention", encoder_hidden_states_img: torch.Tensor): if attn.fused_projections: key_img, value_img = attn.to_added_kv(encoder_hidden_states_img).chunk(2, dim=-1) else: key_img = attn.add_k_proj(encoder_hidden_states_img) value_img = attn.add_v_proj(encoder_hidden_states_img) return key_img, value_img # modified from diffusers.models.transformers.transformer_wan.WanAttnProcessor class WanAttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError( "WanAttnProcessor requires PyTorch 2.0. To use it, please upgrade PyTorch to version 2.0 or higher." ) def __call__( self, attn: "WanAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, ) -> torch.Tensor: encoder_hidden_states_img = None if attn.add_k_proj is not None: # 512 is the context length of the text encoder, hardcoded for now image_context_length = encoder_hidden_states.shape[1] - 512 encoder_hidden_states_img = encoder_hidden_states[:, :image_context_length] encoder_hidden_states = encoder_hidden_states[:, image_context_length:] query, key, value = _get_qkv_projections(attn, hidden_states, encoder_hidden_states) query = attn.norm_q(query) key = attn.norm_k(key) query = query.unflatten(2, (attn.heads, -1)) key = key.unflatten(2, (attn.heads, -1)) value = value.unflatten(2, (attn.heads, -1)) if rotary_emb is not None: def apply_rotary_emb( hidden_states: torch.Tensor, freqs_cos: torch.Tensor, freqs_sin: torch.Tensor, ): x1, x2 = hidden_states.unflatten(-1, (-1, 2)).unbind(-1) cos = freqs_cos[..., 0::2] sin = freqs_sin[..., 1::2] out = torch.empty_like(hidden_states) out[..., 0::2] = x1 * cos - x2 * sin out[..., 1::2] = x1 * sin + x2 * cos return out.type_as(hidden_states) query = apply_rotary_emb(query, *rotary_emb) key = apply_rotary_emb(key, *rotary_emb) # I2V task hidden_states_img = None if encoder_hidden_states_img is not None: key_img, value_img = _get_added_kv_projections(attn, encoder_hidden_states_img) key_img = attn.norm_added_k(key_img) key_img = key_img.unflatten(2, (attn.heads, -1)) value_img = value_img.unflatten(2, (attn.heads, -1)) hidden_states_img = dispatch_attention_fn( query, key_img, value_img, attn_mask=None, dropout_p=0.0, is_causal=False, backend=self._attention_backend, # Reference: https://github.com/huggingface/diffusers/pull/12660 parallel_config=None, ) hidden_states_img = hidden_states_img.flatten(2, 3) hidden_states_img = hidden_states_img.type_as(query) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, # Reference: https://github.com/huggingface/diffusers/pull/12660 parallel_config=(self._parallel_config if encoder_hidden_states is None else None), ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.type_as(query) if hidden_states_img is not None: hidden_states = hidden_states + hidden_states_img hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states # Copied from diffusers.models.transformers.transformer_wan.WanAttnProcessor2_0 class WanAttnProcessor2_0: def __new__(cls, *args, **kwargs): deprecation_message = ( "The WanAttnProcessor2_0 class is deprecated and will be removed in a future version. " "Please use WanAttnProcessor instead. " ) deprecate("WanAttnProcessor2_0", "1.0.0", deprecation_message, standard_warn=False) return WanAttnProcessor(*args, **kwargs) # Copied from diffusers.models.transformers.transformer_wan.WanAttention class WanAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = WanAttnProcessor _available_processors = [WanAttnProcessor] def __init__( self, dim: int, heads: int = 8, dim_head: int = 64, eps: float = 1e-5, dropout: float = 0.0, added_kv_proj_dim: int | None = None, cross_attention_dim_head: int | None = None, processor=None, is_cross_attention=None, ): super().__init__() self.inner_dim = dim_head * heads self.heads = heads self.added_kv_proj_dim = added_kv_proj_dim self.cross_attention_dim_head = cross_attention_dim_head self.kv_inner_dim = self.inner_dim if cross_attention_dim_head is None else cross_attention_dim_head * heads self.to_q = torch.nn.Linear(dim, self.inner_dim, bias=True) self.to_k = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) self.to_v = torch.nn.Linear(dim, self.kv_inner_dim, bias=True) self.to_out = torch.nn.ModuleList( [ torch.nn.Linear(self.inner_dim, dim, bias=True), torch.nn.Dropout(dropout), ] ) self.norm_q = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) self.norm_k = torch.nn.RMSNorm(dim_head * heads, eps=eps, elementwise_affine=True) self.add_k_proj = self.add_v_proj = None if added_kv_proj_dim is not None: self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) self.norm_added_k = torch.nn.RMSNorm(dim_head * heads, eps=eps) if is_cross_attention is not None: self.is_cross_attention = is_cross_attention else: self.is_cross_attention = cross_attention_dim_head is not None self.set_processor(processor) def fuse_projections(self): if getattr(self, "fused_projections", False): return if not self.is_cross_attention: concatenated_weights = torch.cat([self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data]) concatenated_bias = torch.cat([self.to_q.bias.data, self.to_k.bias.data, self.to_v.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_qkv = nn.Linear(in_features, out_features, bias=True) self.to_qkv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) else: concatenated_weights = torch.cat([self.to_k.weight.data, self.to_v.weight.data]) concatenated_bias = torch.cat([self.to_k.bias.data, self.to_v.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_kv = nn.Linear(in_features, out_features, bias=True) self.to_kv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) if self.added_kv_proj_dim is not None: concatenated_weights = torch.cat([self.add_k_proj.weight.data, self.add_v_proj.weight.data]) concatenated_bias = torch.cat([self.add_k_proj.bias.data, self.add_v_proj.bias.data]) out_features, in_features = concatenated_weights.shape with torch.device("meta"): self.to_added_kv = nn.Linear(in_features, out_features, bias=True) self.to_added_kv.load_state_dict( {"weight": concatenated_weights, "bias": concatenated_bias}, strict=True, assign=True ) self.fused_projections = True @torch.no_grad() def unfuse_projections(self): if not getattr(self, "fused_projections", False): return if hasattr(self, "to_qkv"): delattr(self, "to_qkv") if hasattr(self, "to_kv"): delattr(self, "to_kv") if hasattr(self, "to_added_kv"): delattr(self, "to_added_kv") self.fused_projections = False def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, **kwargs, ) -> torch.Tensor: return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, rotary_emb, **kwargs) # Copied from diffusers.models.transformers.transformer_wan.WanImageEmbedding class WanImageEmbedding(torch.nn.Module): def __init__(self, in_features: int, out_features: int, pos_embed_seq_len=None): super().__init__() self.norm1 = FP32LayerNorm(in_features) self.ff = FeedForward(in_features, out_features, mult=1, activation_fn="gelu") self.norm2 = FP32LayerNorm(out_features) if pos_embed_seq_len is not None: self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_seq_len, in_features)) else: self.pos_embed = None def forward(self, encoder_hidden_states_image: torch.Tensor) -> torch.Tensor: if self.pos_embed is not None: batch_size, seq_len, embed_dim = encoder_hidden_states_image.shape encoder_hidden_states_image = encoder_hidden_states_image.view(-1, 2 * seq_len, embed_dim) encoder_hidden_states_image = encoder_hidden_states_image + self.pos_embed hidden_states = self.norm1(encoder_hidden_states_image) hidden_states = self.ff(hidden_states) hidden_states = self.norm2(hidden_states) return hidden_states # Copied from diffusers.models.transformers.transformer_wan.WanTimeTextImageEmbedding class WanTimeTextImageEmbedding(nn.Module): def __init__( self, dim: int, time_freq_dim: int, time_proj_dim: int, text_embed_dim: int, image_embed_dim: int | None = None, pos_embed_seq_len: int | None = None, ): super().__init__() self.timesteps_proj = Timesteps(num_channels=time_freq_dim, flip_sin_to_cos=True, downscale_freq_shift=0) self.time_embedder = TimestepEmbedding(in_channels=time_freq_dim, time_embed_dim=dim) self.act_fn = nn.SiLU() self.time_proj = nn.Linear(dim, time_proj_dim) self.text_embedder = PixArtAlphaTextProjection(text_embed_dim, dim, act_fn="gelu_tanh") self.image_embedder = None if image_embed_dim is not None: self.image_embedder = WanImageEmbedding(image_embed_dim, dim, pos_embed_seq_len=pos_embed_seq_len) def forward( self, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_hidden_states_image: torch.Tensor | None = None, timestep_seq_len: int | None = None, ): timestep = self.timesteps_proj(timestep) if timestep_seq_len is not None: timestep = timestep.unflatten(0, (-1, timestep_seq_len)) time_embedder_dtype = next(iter(self.time_embedder.parameters())).dtype if timestep.dtype != time_embedder_dtype and time_embedder_dtype != torch.int8: timestep = timestep.to(time_embedder_dtype) temb = self.time_embedder(timestep).type_as(encoder_hidden_states) timestep_proj = self.time_proj(self.act_fn(temb)) encoder_hidden_states = self.text_embedder(encoder_hidden_states) if encoder_hidden_states_image is not None: encoder_hidden_states_image = self.image_embedder(encoder_hidden_states_image) return temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image class ChronoEditRotaryPosEmbed(nn.Module): def __init__( self, attention_head_dim: int, patch_size: tuple[int, int, int], max_seq_len: int, theta: float = 10000.0, temporal_skip_len: int = 8, ): super().__init__() self.attention_head_dim = attention_head_dim self.patch_size = patch_size self.max_seq_len = max_seq_len self.temporal_skip_len = temporal_skip_len h_dim = w_dim = 2 * (attention_head_dim // 6) t_dim = attention_head_dim - h_dim - w_dim freqs_dtype = torch.float32 if torch.backends.mps.is_available() else torch.float64 freqs_cos = [] freqs_sin = [] for dim in [t_dim, h_dim, w_dim]: freq_cos, freq_sin = get_1d_rotary_pos_embed( dim, max_seq_len, theta, use_real=True, repeat_interleave_real=True, freqs_dtype=freqs_dtype, ) freqs_cos.append(freq_cos) freqs_sin.append(freq_sin) self.register_buffer("freqs_cos", torch.cat(freqs_cos, dim=1), persistent=False) self.register_buffer("freqs_sin", torch.cat(freqs_sin, dim=1), persistent=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.patch_size ppf, pph, ppw = num_frames // p_t, height // p_h, width // p_w split_sizes = [ self.attention_head_dim - 2 * (self.attention_head_dim // 3), self.attention_head_dim // 3, self.attention_head_dim // 3, ] freqs_cos = self.freqs_cos.split(split_sizes, dim=1) freqs_sin = self.freqs_sin.split(split_sizes, dim=1) if num_frames == 2: freqs_cos_f = freqs_cos[0][: self.temporal_skip_len][[0, -1]].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) else: freqs_cos_f = freqs_cos[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) freqs_cos_h = freqs_cos[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) freqs_cos_w = freqs_cos[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) if num_frames == 2: freqs_sin_f = freqs_sin[0][: self.temporal_skip_len][[0, -1]].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) else: freqs_sin_f = freqs_sin[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) freqs_sin_h = freqs_sin[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) freqs_sin_w = freqs_sin[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) freqs_cos = torch.cat([freqs_cos_f, freqs_cos_h, freqs_cos_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) freqs_sin = torch.cat([freqs_sin_f, freqs_sin_h, freqs_sin_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) return freqs_cos, freqs_sin @maybe_allow_in_graph # Copied from diffusers.models.transformers.transformer_wan.WanTransformerBlock class WanTransformerBlock(nn.Module): def __init__( self, dim: int, ffn_dim: int, num_heads: int, qk_norm: str = "rms_norm_across_heads", cross_attn_norm: bool = False, eps: float = 1e-6, added_kv_proj_dim: int | None = None, ): super().__init__() # 1. Self-attention self.norm1 = FP32LayerNorm(dim, eps, elementwise_affine=False) self.attn1 = WanAttention( dim=dim, heads=num_heads, dim_head=dim // num_heads, eps=eps, cross_attention_dim_head=None, processor=WanAttnProcessor(), ) # 2. Cross-attention self.attn2 = WanAttention( dim=dim, heads=num_heads, dim_head=dim // num_heads, eps=eps, added_kv_proj_dim=added_kv_proj_dim, cross_attention_dim_head=dim // num_heads, processor=WanAttnProcessor(), ) self.norm2 = FP32LayerNorm(dim, eps, elementwise_affine=True) if cross_attn_norm else nn.Identity() # 3. Feed-forward self.ffn = FeedForward(dim, inner_dim=ffn_dim, activation_fn="gelu-approximate") self.norm3 = FP32LayerNorm(dim, eps, elementwise_affine=False) self.scale_shift_table = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, rotary_emb: torch.Tensor, ) -> torch.Tensor: if temb.ndim == 4: # temb: batch_size, seq_len, 6, inner_dim (wan2.2 ti2v) shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = ( self.scale_shift_table.unsqueeze(0) + temb.float() ).chunk(6, dim=2) # batch_size, seq_len, 1, inner_dim shift_msa = shift_msa.squeeze(2) scale_msa = scale_msa.squeeze(2) gate_msa = gate_msa.squeeze(2) c_shift_msa = c_shift_msa.squeeze(2) c_scale_msa = c_scale_msa.squeeze(2) c_gate_msa = c_gate_msa.squeeze(2) else: # temb: batch_size, 6, inner_dim (wan2.1/wan2.2 14B) shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = ( self.scale_shift_table + temb.float() ).chunk(6, dim=1) # 1. Self-attention norm_hidden_states = (self.norm1(hidden_states.float()) * (1 + scale_msa) + shift_msa).type_as(hidden_states) attn_output = self.attn1(norm_hidden_states, None, None, rotary_emb) hidden_states = (hidden_states.float() + attn_output * gate_msa).type_as(hidden_states) # 2. Cross-attention norm_hidden_states = self.norm2(hidden_states.float()).type_as(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states, None, None) hidden_states = hidden_states + attn_output # 3. Feed-forward norm_hidden_states = (self.norm3(hidden_states.float()) * (1 + c_scale_msa) + c_shift_msa).type_as( hidden_states ) ff_output = self.ffn(norm_hidden_states) hidden_states = (hidden_states.float() + ff_output.float() * c_gate_msa).type_as(hidden_states) return hidden_states # modified from diffusers.models.transformers.transformer_wan.WanTransformer3DModel class ChronoEditTransformer3DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin ): r""" A Transformer model for video-like data used in the ChronoEdit model. Args: patch_size (`tuple[int]`, defaults to `(1, 2, 2)`): 3D patch dimensions for video embedding (t_patch, h_patch, w_patch). num_attention_heads (`int`, defaults to `40`): Fixed length for text embeddings. attention_head_dim (`int`, defaults to `128`): The number of channels in each head. in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, defaults to `16`): The number of channels in the output. text_dim (`int`, defaults to `512`): Input dimension for text embeddings. freq_dim (`int`, defaults to `256`): Dimension for sinusoidal time embeddings. ffn_dim (`int`, defaults to `13824`): Intermediate dimension in feed-forward network. num_layers (`int`, defaults to `40`): The number of layers of transformer blocks to use. window_size (`tuple[int]`, defaults to `(-1, -1)`): Window size for local attention (-1 indicates global attention). cross_attn_norm (`bool`, defaults to `True`): Enable cross-attention normalization. qk_norm (`bool`, defaults to `True`): Enable query/key normalization. eps (`float`, defaults to `1e-6`): Epsilon value for normalization layers. add_img_emb (`bool`, defaults to `False`): Whether to use img_emb. added_kv_proj_dim (`int`, *optional*, defaults to `None`): The number of channels to use for the added key and value projections. If `None`, no projection is used. """ _supports_gradient_checkpointing = True _skip_layerwise_casting_patterns = ["patch_embedding", "condition_embedder", "norm"] _no_split_modules = ["WanTransformerBlock"] _keep_in_fp32_modules = ["time_embedder", "scale_shift_table", "norm1", "norm2", "norm3"] _keys_to_ignore_on_load_unexpected = ["norm_added_q"] _repeated_blocks = ["WanTransformerBlock"] _cp_plan = { "rope": { 0: ContextParallelInput(split_dim=1, expected_dims=4, split_output=True), 1: ContextParallelInput(split_dim=1, expected_dims=4, split_output=True), }, "blocks.0": { "hidden_states": ContextParallelInput(split_dim=1, expected_dims=3, split_output=False), }, # Reference: https://github.com/huggingface/diffusers/pull/12660 # We need to disable the splitting of encoder_hidden_states because # the image_encoder consistently generates 257 tokens for image_embed. This causes # the shape of encoder_hidden_states—whose token count is always 769 (512 + 257) # after concatenation—to be indivisible by the number of devices in the CP. "proj_out": ContextParallelOutput(gather_dim=1, expected_dims=3), } @register_to_config def __init__( self, patch_size: tuple[int] = (1, 2, 2), num_attention_heads: int = 40, attention_head_dim: int = 128, in_channels: int = 16, out_channels: int = 16, text_dim: int = 4096, freq_dim: int = 256, ffn_dim: int = 13824, num_layers: int = 40, cross_attn_norm: bool = True, qk_norm: str | None = "rms_norm_across_heads", eps: float = 1e-6, image_dim: int | None = None, added_kv_proj_dim: int | None = None, rope_max_seq_len: int = 1024, pos_embed_seq_len: int | None = None, rope_temporal_skip_len: int = 8, ) -> None: super().__init__() inner_dim = num_attention_heads * attention_head_dim out_channels = out_channels or in_channels # 1. Patch & position embedding self.rope = ChronoEditRotaryPosEmbed( attention_head_dim, patch_size, rope_max_seq_len, temporal_skip_len=rope_temporal_skip_len ) self.patch_embedding = nn.Conv3d(in_channels, inner_dim, kernel_size=patch_size, stride=patch_size) # 2. Condition embeddings # image_embedding_dim=1280 for I2V model self.condition_embedder = WanTimeTextImageEmbedding( dim=inner_dim, time_freq_dim=freq_dim, time_proj_dim=inner_dim * 6, text_embed_dim=text_dim, image_embed_dim=image_dim, pos_embed_seq_len=pos_embed_seq_len, ) # 3. Transformer blocks self.blocks = nn.ModuleList( [ WanTransformerBlock( inner_dim, ffn_dim, num_attention_heads, qk_norm, cross_attn_norm, eps, added_kv_proj_dim ) for _ in range(num_layers) ] ) # 4. Output norm & projection self.norm_out = FP32LayerNorm(inner_dim, eps, elementwise_affine=False) self.proj_out = nn.Linear(inner_dim, out_channels * math.prod(patch_size)) self.scale_shift_table = nn.Parameter(torch.randn(1, 2, inner_dim) / inner_dim**0.5) self.gradient_checkpointing = False @apply_lora_scale("attention_kwargs") def forward( self, hidden_states: torch.Tensor, timestep: torch.LongTensor, encoder_hidden_states: torch.Tensor, encoder_hidden_states_image: torch.Tensor | None = None, return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, ) -> torch.Tensor | dict[str, torch.Tensor]: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.config.patch_size post_patch_num_frames = num_frames // p_t post_patch_height = height // p_h post_patch_width = width // p_w rotary_emb = self.rope(hidden_states) hidden_states = self.patch_embedding(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) # timestep shape: batch_size, or batch_size, seq_len (wan 2.2 ti2v) if timestep.ndim == 2: ts_seq_len = timestep.shape[1] timestep = timestep.flatten() # batch_size * seq_len else: ts_seq_len = None temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image = self.condition_embedder( timestep, encoder_hidden_states, encoder_hidden_states_image, timestep_seq_len=ts_seq_len ) if ts_seq_len is not None: # batch_size, seq_len, 6, inner_dim timestep_proj = timestep_proj.unflatten(2, (6, -1)) else: # batch_size, 6, inner_dim timestep_proj = timestep_proj.unflatten(1, (6, -1)) if encoder_hidden_states_image is not None: encoder_hidden_states = torch.concat([encoder_hidden_states_image, encoder_hidden_states], dim=1) # 4. Transformer blocks if torch.is_grad_enabled() and self.gradient_checkpointing: for block in self.blocks: hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, timestep_proj, rotary_emb ) else: for block in self.blocks: hidden_states = block(hidden_states, encoder_hidden_states, timestep_proj, rotary_emb) # 5. Output norm, projection & unpatchify if temb.ndim == 3: # batch_size, seq_len, inner_dim (wan 2.2 ti2v) shift, scale = (self.scale_shift_table.unsqueeze(0).to(temb.device) + temb.unsqueeze(2)).chunk(2, dim=2) shift = shift.squeeze(2) scale = scale.squeeze(2) else: # batch_size, inner_dim shift, scale = (self.scale_shift_table.to(temb.device) + temb.unsqueeze(1)).chunk(2, dim=1) # Move the shift and scale tensors to the same device as hidden_states. # When using multi-GPU inference via accelerate these will be on the # first device rather than the last device, which hidden_states ends up # on. shift = shift.to(hidden_states.device) scale = scale.to(hidden_states.device) hidden_states = (self.norm_out(hidden_states.float()) * (1 + scale) + shift).type_as(hidden_states) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, p_t, p_h, p_w, -1 ) hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6) output = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_chronoedit.py", "license": "Apache License 2.0", "lines": 618, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/chronoedit/pipeline_chronoedit.py
# Copyright 2025 The ChronoEdit Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import html from typing import Any, Callable import PIL import regex as re import torch from transformers import AutoTokenizer, CLIPImageProcessor, CLIPVisionModel, UMT5EncoderModel from ...callbacks import MultiPipelineCallbacks, PipelineCallback from ...image_processor import PipelineImageInput from ...loaders import WanLoraLoaderMixin from ...models import AutoencoderKLWan, ChronoEditTransformer3DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import is_ftfy_available, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ...video_processor import VideoProcessor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import ChronoEditPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name if is_ftfy_available(): import ftfy EXAMPLE_DOC_STRING = """ Examples: ```python >>> import torch >>> import numpy as np >>> from diffusers import AutoencoderKLWan, ChronoEditTransformer3DModel, ChronoEditPipeline >>> from diffusers.utils import export_to_video, load_image >>> from transformers import CLIPVisionModel >>> # Available models: nvidia/ChronoEdit-14B-Diffusers >>> model_id = "nvidia/ChronoEdit-14B-Diffusers" >>> image_encoder = CLIPVisionModel.from_pretrained( ... model_id, subfolder="image_encoder", torch_dtype=torch.float32 ... ) >>> vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32) >>> transformer = ChronoEditTransformer3DModel.from_pretrained( ... model_id, subfolder="transformer", torch_dtype=torch.bfloat16 ... ) >>> pipe = ChronoEditPipeline.from_pretrained( ... model_id, vae=vae, image_encoder=image_encoder, transformer=transformer, torch_dtype=torch.bfloat16 ... ) >>> pipe.to("cuda") >>> image = load_image("https://huggingface.co/spaces/nvidia/ChronoEdit/resolve/main/examples/3.png") >>> max_area = 720 * 1280 >>> aspect_ratio = image.height / image.width >>> mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1] >>> height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value >>> width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value >>> image = image.resize((width, height)) >>> prompt = ( ... "The user wants to transform the image by adding a small, cute mouse sitting inside the floral teacup, enjoying a spa bath. The mouse should appear relaxed and cheerful, with a tiny white bath towel draped over its head like a turban. It should be positioned comfortably in the cup’s liquid, with gentle steam rising around it to blend with the cozy atmosphere. " ... "The mouse’s pose should be natural—perhaps sitting upright with paws resting lightly on the rim or submerged in the tea. The teacup’s floral design, gold trim, and warm lighting must remain unchanged to preserve the original aesthetic. The steam should softly swirl around the mouse, enhancing the spa-like, whimsical mood." ... ) >>> output = pipe( ... image=image, ... prompt=prompt, ... height=height, ... width=width, ... num_frames=5, ... guidance_scale=5.0, ... enable_temporal_reasoning=False, ... num_temporal_reasoning_steps=0, ... ).frames[0] >>> export_to_video(output, "output.mp4", fps=16) ``` """ def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text def prompt_clean(text): text = whitespace_clean(basic_clean(text)) return text # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") class ChronoEditPipeline(DiffusionPipeline, WanLoraLoaderMixin): r""" Pipeline for image-to-video generation using Wan. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: tokenizer ([`T5Tokenizer`]): Tokenizer from [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5Tokenizer), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/umt5-xxl](https://huggingface.co/google/umt5-xxl) variant. image_encoder ([`CLIPVisionModel`]): [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModel), specifically the [clip-vit-huge-patch14](https://github.com/mlfoundations/open_clip/blob/main/docs/PRETRAINED.md#vit-h14-xlm-roberta-large) variant. transformer ([`WanTransformer3DModel`]): Conditional Transformer to denoise the input latents. scheduler ([`UniPCMultistepScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKLWan`]): Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. """ model_cpu_offload_seq = "text_encoder->image_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, tokenizer: AutoTokenizer, text_encoder: UMT5EncoderModel, image_encoder: CLIPVisionModel, image_processor: CLIPImageProcessor, transformer: ChronoEditTransformer3DModel, vae: AutoencoderKLWan, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, image_encoder=image_encoder, transformer=transformer, scheduler=scheduler, image_processor=image_processor, ) self.vae_scale_factor_temporal = self.vae.config.scale_factor_temporal if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.config.scale_factor_spatial if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) self.image_processor = image_processor # Copied from diffusers.pipelines.wan.pipeline_wan_i2v.WanImageToVideoPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, prompt: str | list[str] = None, num_videos_per_prompt: int = 1, max_sequence_length: int = 512, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt prompt = [prompt_clean(u) for u in prompt] batch_size = len(prompt) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_attention_mask=True, return_tensors="pt", ) text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask seq_lens = mask.gt(0).sum(dim=1).long() prompt_embeds = self.text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] prompt_embeds = torch.stack( [torch.cat([u, u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) for u in prompt_embeds], dim=0 ) # duplicate text embeddings for each generation per prompt, using mps friendly method _, seq_len, _ = prompt_embeds.shape prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1) return prompt_embeds # Copied from diffusers.pipelines.wan.pipeline_wan_i2v.WanImageToVideoPipeline.encode_image def encode_image( self, image: PipelineImageInput, device: torch.device | None = None, ): device = device or self._execution_device image = self.image_processor(images=image, return_tensors="pt").to(device) image_embeds = self.image_encoder(**image, output_hidden_states=True) return image_embeds.hidden_states[-2] # Copied from diffusers.pipelines.wan.pipeline_wan.WanPipeline.encode_prompt def encode_prompt( self, prompt: str | list[str], negative_prompt: str | list[str] | None = None, do_classifier_free_guidance: bool = True, num_videos_per_prompt: int = 1, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, max_sequence_length: int = 226, device: torch.device | None = None, dtype: torch.dtype | None = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): Whether to use classifier free guidance or not. num_videos_per_prompt (`int`, *optional*, defaults to 1): Number of videos that should be generated per prompt. torch device to place the resulting embeddings on prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. device: (`torch.device`, *optional*): torch device dtype: (`torch.dtype`, *optional*): torch dtype """ device = device or self._execution_device prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds = self._get_t5_prompt_embeds( prompt=negative_prompt, num_videos_per_prompt=num_videos_per_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype, ) return prompt_embeds, negative_prompt_embeds # modified from diffusers.pipelines.wan.pipeline_wan_i2v.WanImageToVideoPipeline.check_inputs def check_inputs( self, prompt, negative_prompt, image, height, width, prompt_embeds=None, negative_prompt_embeds=None, image_embeds=None, callback_on_step_end_tensor_inputs=None, ): if image is not None and image_embeds is not None: raise ValueError( f"Cannot forward both `image`: {image} and `image_embeds`: {image_embeds}. Please make sure to" " only forward one of the two." ) if image is None and image_embeds is None: raise ValueError( "Provide either `image` or `prompt_embeds`. Cannot leave both `image` and `image_embeds` undefined." ) if image is not None and not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image): raise ValueError(f"`image` has to be of type `torch.Tensor` or `PIL.Image.Image` but is {type(image)}") if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") # modified from diffusers.pipelines.wan.pipeline_wan_i2v.WanImageToVideoPipeline.prepare_latents def prepare_latents( self, image: PipelineImageInput, batch_size: int, num_channels_latents: int = 16, height: int = 480, width: int = 832, num_frames: int = 81, dtype: torch.dtype | None = None, device: torch.device | None = None, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: num_latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 latent_height = height // self.vae_scale_factor_spatial latent_width = width // self.vae_scale_factor_spatial shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) image = image.unsqueeze(2) # [batch_size, channels, 1, height, width] video_condition = torch.cat( [image, image.new_zeros(image.shape[0], image.shape[1], num_frames - 1, height, width)], dim=2 ) video_condition = video_condition.to(device=device, dtype=self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) if isinstance(generator, list): latent_condition = [ retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax") for _ in generator ] latent_condition = torch.cat(latent_condition) else: latent_condition = retrieve_latents(self.vae.encode(video_condition), sample_mode="argmax") latent_condition = latent_condition.repeat(batch_size, 1, 1, 1, 1) latent_condition = latent_condition.to(dtype) latent_condition = (latent_condition - latents_mean) * latents_std mask_lat_size = torch.ones(batch_size, 1, num_frames, latent_height, latent_width) mask_lat_size[:, :, list(range(1, num_frames))] = 0 first_frame_mask = mask_lat_size[:, :, 0:1] first_frame_mask = torch.repeat_interleave(first_frame_mask, dim=2, repeats=self.vae_scale_factor_temporal) mask_lat_size = torch.concat([first_frame_mask, mask_lat_size[:, :, 1:, :]], dim=2) mask_lat_size = mask_lat_size.view(batch_size, -1, self.vae_scale_factor_temporal, latent_height, latent_width) mask_lat_size = mask_lat_size.transpose(1, 2) mask_lat_size = mask_lat_size.to(latent_condition.device) return latents, torch.concat([mask_lat_size, latent_condition], dim=1) @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @property def current_timestep(self): return self._current_timestep @property def interrupt(self): return self._interrupt @property def attention_kwargs(self): return self._attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image: PipelineImageInput, prompt: str | list[str] = None, negative_prompt: str | list[str] = None, height: int = 480, width: int = 832, num_frames: int = 81, num_inference_steps: int = 50, guidance_scale: float = 5.0, num_videos_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.Tensor | None = None, prompt_embeds: torch.Tensor | None = None, negative_prompt_embeds: torch.Tensor | None = None, image_embeds: torch.Tensor | None = None, output_type: str | None = "np", return_dict: bool = True, attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 512, enable_temporal_reasoning: bool = False, num_temporal_reasoning_steps: int = 0, ): r""" The call function to the pipeline for generation. Args: image (`PipelineImageInput`): The input image to condition the generation on. Must be an image, a list of images or a `torch.Tensor`. prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, defaults to `480`): The height of the generated video. width (`int`, defaults to `832`): The width of the generated video. num_frames (`int`, defaults to `81`): The number of frames in the generated video. num_inference_steps (`int`, defaults to `50`): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, defaults to `5.0`): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `negative_prompt` input argument. image_embeds (`torch.Tensor`, *optional*): Pre-generated image embeddings. Can be used to easily tweak image inputs (weighting). If not provided, image embeddings are generated from the `image` input argument. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`ChronoEditPipelineOutput`] instead of a plain tuple. attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of each denoising step during the inference. with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int`, defaults to `512`): The maximum sequence length of the text encoder. If the prompt is longer than this, it will be truncated. If the prompt is shorter, it will be padded to this length. enable_temporal_reasoning (`bool`, *optional*, defaults to `False`): Whether to enable temporal reasoning. num_temporal_reasoning_steps (`int`, *optional*, defaults to `0`): The number of steps to enable temporal reasoning. Examples: Returns: [`~ChronoEditPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`ChronoEditPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, negative_prompt, image, height, width, prompt_embeds, negative_prompt_embeds, image_embeds, callback_on_step_end_tensor_inputs, ) num_frames = 5 if not enable_temporal_reasoning else num_frames if num_frames % self.vae_scale_factor_temporal != 1: logger.warning( f"`num_frames - 1` has to be divisible by {self.vae_scale_factor_temporal}. Rounding to the nearest number." ) num_frames = num_frames // self.vae_scale_factor_temporal * self.vae_scale_factor_temporal + 1 num_frames = max(num_frames, 1) self._guidance_scale = guidance_scale self._attention_kwargs = attention_kwargs self._current_timestep = None self._interrupt = False device = self._execution_device # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # 3. Encode input prompt prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, num_videos_per_prompt=num_videos_per_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_sequence_length=max_sequence_length, device=device, ) # Encode image embedding transformer_dtype = self.transformer.dtype prompt_embeds = prompt_embeds.to(transformer_dtype) if negative_prompt_embeds is not None: negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype) if image_embeds is None: image_embeds = self.encode_image(image, device) image_embeds = image_embeds.repeat(batch_size, 1, 1) image_embeds = image_embeds.to(transformer_dtype) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.vae.config.z_dim image = self.video_processor.preprocess(image, height=height, width=width).to(device, dtype=torch.float32) latents, condition = self.prepare_latents( image, batch_size * num_videos_per_prompt, num_channels_latents, height, width, num_frames, torch.float32, device, generator, latents, ) # 6. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue if enable_temporal_reasoning and i == num_temporal_reasoning_steps: latents = latents[:, :, [0, -1]] condition = condition[:, :, [0, -1]] for j in range(len(self.scheduler.model_outputs)): if self.scheduler.model_outputs[j] is not None: if latents.shape[-3] != self.scheduler.model_outputs[j].shape[-3]: self.scheduler.model_outputs[j] = self.scheduler.model_outputs[j][:, :, [0, -1]] if self.scheduler.last_sample is not None: self.scheduler.last_sample = self.scheduler.last_sample[:, :, [0, -1]] self._current_timestep = t latent_model_input = torch.cat([latents, condition], dim=1).to(transformer_dtype) timestep = t.expand(latents.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, encoder_hidden_states_image=image_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] if self.do_classifier_free_guidance: noise_uncond = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=negative_prompt_embeds, encoder_hidden_states_image=image_embeds, attention_kwargs=attention_kwargs, return_dict=False, )[0] noise_pred = noise_uncond + guidance_scale * (noise_pred - noise_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() self._current_timestep = None if not output_type == "latent": latents = latents.to(self.vae.dtype) latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents.device, latents.dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents.device, latents.dtype ) latents = latents / latents_std + latents_mean if enable_temporal_reasoning and latents.shape[2] > 2: video_edit = self.vae.decode(latents[:, :, [0, -1]], return_dict=False)[0] video_reason = self.vae.decode(latents[:, :, :-1], return_dict=False)[0] video = torch.cat([video_reason, video_edit[:, :, 1:]], dim=2) else: video = self.vae.decode(latents, return_dict=False)[0] video = self.video_processor.postprocess_video(video, output_type=output_type) else: video = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return (video,) return ChronoEditPipelineOutput(frames=video)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/chronoedit/pipeline_chronoedit.py", "license": "Apache License 2.0", "lines": 656, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/chronoedit/pipeline_output.py
from dataclasses import dataclass import torch from diffusers.utils import BaseOutput @dataclass class ChronoEditPipelineOutput(BaseOutput): r""" Output class for ChronoEdit pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or list[list[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: torch.Tensor
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/chronoedit/pipeline_output.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
huggingface/diffusers:tests/pipelines/chronoedit/test_chronoedit.py
# Copyright 2025 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from PIL import Image from transformers import ( AutoConfig, AutoTokenizer, CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection, T5EncoderModel, ) from diffusers import ( AutoencoderKLWan, ChronoEditPipeline, ChronoEditTransformer3DModel, FlowMatchEulerDiscreteScheduler, ) from ...testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class ChronoEditPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ChronoEditPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "height", "width"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False supports_dduf = False def get_dummy_components(self): torch.manual_seed(0) vae = AutoencoderKLWan( base_dim=3, z_dim=16, dim_mult=[1, 1, 1, 1], num_res_blocks=1, temperal_downsample=[False, True, True], ) torch.manual_seed(0) # TODO: impl FlowDPMSolverMultistepScheduler scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5") text_encoder = T5EncoderModel(config) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) transformer = ChronoEditTransformer3DModel( patch_size=(1, 2, 2), num_attention_heads=2, attention_head_dim=12, in_channels=36, out_channels=16, text_dim=32, freq_dim=256, ffn_dim=32, num_layers=2, cross_attn_norm=True, qk_norm="rms_norm_across_heads", rope_max_seq_len=32, image_dim=4, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=4, projection_dim=4, num_hidden_layers=2, num_attention_heads=2, image_size=32, intermediate_size=16, patch_size=1, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) torch.manual_seed(0) image_processor = CLIPImageProcessor(crop_size=32, size=32) components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, "image_encoder": image_encoder, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image_height = 16 image_width = 16 image = Image.new("RGB", (image_width, image_height)) inputs = { "image": image, "prompt": "dance monkey", "negative_prompt": "negative", # TODO "height": image_height, "width": image_width, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "num_frames": 5, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) video = pipe(**inputs).frames generated_video = video[0] self.assertEqual(generated_video.shape, (5, 3, 16, 16)) # fmt: off expected_slice = torch.tensor([0.4525, 0.4520, 0.4485, 0.4534, 0.4523, 0.4522, 0.4529, 0.4528, 0.5022, 0.5064, 0.5011, 0.5061, 0.5028, 0.4979, 0.5117, 0.5192]) # fmt: on generated_slice = generated_video.flatten() generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) @unittest.skip("Test not supported") def test_attention_slicing_forward_pass(self): pass @unittest.skip("TODO: revisit failing as it requires a very high threshold to pass") def test_inference_batch_single_identical(self): pass @unittest.skip( "ChronoEditPipeline has to run in mixed precision. Save/Load the entire pipeline in FP16 will result in errors" ) def test_save_load_float16(self): pass
{ "repo_id": "huggingface/diffusers", "file_path": "tests/pipelines/chronoedit/test_chronoedit.py", "license": "Apache License 2.0", "lines": 155, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:scripts/convert_sana_video_to_diffusers.py
#!/usr/bin/env python from __future__ import annotations import argparse import os from contextlib import nullcontext import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download, snapshot_download from termcolor import colored from transformers import AutoModelForCausalLM, AutoTokenizer from diffusers import ( AutoencoderKLWan, DPMSolverMultistepScheduler, FlowMatchEulerDiscreteScheduler, SanaVideoPipeline, SanaVideoTransformer3DModel, UniPCMultistepScheduler, ) from diffusers.utils.import_utils import is_accelerate_available CTX = init_empty_weights if is_accelerate_available else nullcontext ckpt_ids = ["Efficient-Large-Model/SANA-Video_2B_480p/checkpoints/SANA_Video_2B_480p.pth"] # https://github.com/NVlabs/Sana/blob/main/inference_video_scripts/inference_sana_video.py def main(args): cache_dir_path = os.path.expanduser("~/.cache/huggingface/hub") if args.orig_ckpt_path is None or args.orig_ckpt_path in ckpt_ids: ckpt_id = args.orig_ckpt_path or ckpt_ids[0] snapshot_download( repo_id=f"{'/'.join(ckpt_id.split('/')[:2])}", cache_dir=cache_dir_path, repo_type="model", ) file_path = hf_hub_download( repo_id=f"{'/'.join(ckpt_id.split('/')[:2])}", filename=f"{'/'.join(ckpt_id.split('/')[2:])}", cache_dir=cache_dir_path, repo_type="model", ) else: file_path = args.orig_ckpt_path print(colored(f"Loading checkpoint from {file_path}", "green", attrs=["bold"])) all_state_dict = torch.load(file_path, weights_only=True) state_dict = all_state_dict.pop("state_dict") converted_state_dict = {} # Patch embeddings. converted_state_dict["patch_embedding.weight"] = state_dict.pop("x_embedder.proj.weight") converted_state_dict["patch_embedding.bias"] = state_dict.pop("x_embedder.proj.bias") # Caption projection. converted_state_dict["caption_projection.linear_1.weight"] = state_dict.pop("y_embedder.y_proj.fc1.weight") converted_state_dict["caption_projection.linear_1.bias"] = state_dict.pop("y_embedder.y_proj.fc1.bias") converted_state_dict["caption_projection.linear_2.weight"] = state_dict.pop("y_embedder.y_proj.fc2.weight") converted_state_dict["caption_projection.linear_2.bias"] = state_dict.pop("y_embedder.y_proj.fc2.bias") converted_state_dict["time_embed.emb.timestep_embedder.linear_1.weight"] = state_dict.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_1.bias"] = state_dict.pop("t_embedder.mlp.0.bias") converted_state_dict["time_embed.emb.timestep_embedder.linear_2.weight"] = state_dict.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["time_embed.emb.timestep_embedder.linear_2.bias"] = state_dict.pop("t_embedder.mlp.2.bias") # Shared norm. converted_state_dict["time_embed.linear.weight"] = state_dict.pop("t_block.1.weight") converted_state_dict["time_embed.linear.bias"] = state_dict.pop("t_block.1.bias") # y norm converted_state_dict["caption_norm.weight"] = state_dict.pop("attention_y_norm.weight") # scheduler flow_shift = 8.0 if args.task == "i2v": assert args.scheduler_type == "flow-euler", "Scheduler type must be flow-euler for i2v task." # model config layer_num = 20 # Positional embedding interpolation scale. qk_norm = True # sample size if args.video_size == 480: sample_size = 30 # Wan-VAE: 8xp2 downsample factor patch_size = (1, 2, 2) elif args.video_size == 720: sample_size = 22 # Wan-VAE: 32xp1 downsample factor patch_size = (1, 1, 1) else: raise ValueError(f"Video size {args.video_size} is not supported.") for depth in range(layer_num): # Transformer blocks. converted_state_dict[f"transformer_blocks.{depth}.scale_shift_table"] = state_dict.pop( f"blocks.{depth}.scale_shift_table" ) # Linear Attention is all you need 🤘 # Self attention. q, k, v = torch.chunk(state_dict.pop(f"blocks.{depth}.attn.qkv.weight"), 3, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn1.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn1.to_v.weight"] = v if qk_norm is not None: # Add Q/K normalization for self-attention (attn1) - needed for Sana-Sprint and Sana-1.5 converted_state_dict[f"transformer_blocks.{depth}.attn1.norm_q.weight"] = state_dict.pop( f"blocks.{depth}.attn.q_norm.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn1.norm_k.weight"] = state_dict.pop( f"blocks.{depth}.attn.k_norm.weight" ) # Projection. converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.weight"] = state_dict.pop( f"blocks.{depth}.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.bias"] = state_dict.pop( f"blocks.{depth}.attn.proj.bias" ) # Feed-forward. converted_state_dict[f"transformer_blocks.{depth}.ff.conv_inverted.weight"] = state_dict.pop( f"blocks.{depth}.mlp.inverted_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_inverted.bias"] = state_dict.pop( f"blocks.{depth}.mlp.inverted_conv.conv.bias" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_depth.weight"] = state_dict.pop( f"blocks.{depth}.mlp.depth_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_depth.bias"] = state_dict.pop( f"blocks.{depth}.mlp.depth_conv.conv.bias" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_point.weight"] = state_dict.pop( f"blocks.{depth}.mlp.point_conv.conv.weight" ) converted_state_dict[f"transformer_blocks.{depth}.ff.conv_temp.weight"] = state_dict.pop( f"blocks.{depth}.mlp.t_conv.weight" ) # Cross-attention. q = state_dict.pop(f"blocks.{depth}.cross_attn.q_linear.weight") q_bias = state_dict.pop(f"blocks.{depth}.cross_attn.q_linear.bias") k, v = torch.chunk(state_dict.pop(f"blocks.{depth}.cross_attn.kv_linear.weight"), 2, dim=0) k_bias, v_bias = torch.chunk(state_dict.pop(f"blocks.{depth}.cross_attn.kv_linear.bias"), 2, dim=0) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.weight"] = q converted_state_dict[f"transformer_blocks.{depth}.attn2.to_q.bias"] = q_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.weight"] = k converted_state_dict[f"transformer_blocks.{depth}.attn2.to_k.bias"] = k_bias converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.weight"] = v converted_state_dict[f"transformer_blocks.{depth}.attn2.to_v.bias"] = v_bias if qk_norm is not None: # Add Q/K normalization for cross-attention (attn2) - needed for Sana-Sprint and Sana-1.5 converted_state_dict[f"transformer_blocks.{depth}.attn2.norm_q.weight"] = state_dict.pop( f"blocks.{depth}.cross_attn.q_norm.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.norm_k.weight"] = state_dict.pop( f"blocks.{depth}.cross_attn.k_norm.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.weight"] = state_dict.pop( f"blocks.{depth}.cross_attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{depth}.attn2.to_out.0.bias"] = state_dict.pop( f"blocks.{depth}.cross_attn.proj.bias" ) # Final block. converted_state_dict["proj_out.weight"] = state_dict.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = state_dict.pop("final_layer.linear.bias") converted_state_dict["scale_shift_table"] = state_dict.pop("final_layer.scale_shift_table") # Transformer with CTX(): transformer_kwargs = { "in_channels": 16, "out_channels": 16, "num_attention_heads": 20, "attention_head_dim": 112, "num_layers": 20, "num_cross_attention_heads": 20, "cross_attention_head_dim": 112, "cross_attention_dim": 2240, "caption_channels": 2304, "mlp_ratio": 3.0, "attention_bias": False, "sample_size": sample_size, "patch_size": patch_size, "norm_elementwise_affine": False, "norm_eps": 1e-6, "qk_norm": "rms_norm_across_heads", "rope_max_seq_len": 1024, } transformer = SanaVideoTransformer3DModel(**transformer_kwargs) transformer.load_state_dict(converted_state_dict, strict=True, assign=True) try: state_dict.pop("y_embedder.y_embedding") state_dict.pop("pos_embed") state_dict.pop("logvar_linear.weight") state_dict.pop("logvar_linear.bias") except KeyError: print("y_embedder.y_embedding or pos_embed not found in the state_dict") assert len(state_dict) == 0, f"State dict is not empty, {state_dict.keys()}" num_model_params = sum(p.numel() for p in transformer.parameters()) print(f"Total number of transformer parameters: {num_model_params}") transformer = transformer.to(weight_dtype) if not args.save_full_pipeline: print( colored( f"Only saving transformer model of {args.model_type}. " f"Set --save_full_pipeline to save the whole Pipeline", "green", attrs=["bold"], ) ) transformer.save_pretrained( os.path.join(args.dump_path, "transformer"), safe_serialization=True, max_shard_size="5GB" ) else: print(colored(f"Saving the whole Pipeline containing {args.model_type}", "green", attrs=["bold"])) # VAE vae = AutoencoderKLWan.from_pretrained( "Wan-AI/Wan2.1-T2V-1.3B-Diffusers", subfolder="vae", torch_dtype=torch.float32 ) # Text Encoder text_encoder_model_path = "Efficient-Large-Model/gemma-2-2b-it" tokenizer = AutoTokenizer.from_pretrained(text_encoder_model_path) tokenizer.padding_side = "right" text_encoder = AutoModelForCausalLM.from_pretrained( text_encoder_model_path, torch_dtype=torch.bfloat16 ).get_decoder() # Choose the appropriate pipeline and scheduler based on model type # Original Sana scheduler if args.scheduler_type == "flow-dpm_solver": scheduler = DPMSolverMultistepScheduler( flow_shift=flow_shift, use_flow_sigmas=True, prediction_type="flow_prediction", ) elif args.scheduler_type == "flow-euler": scheduler = FlowMatchEulerDiscreteScheduler(shift=flow_shift) elif args.scheduler_type == "uni-pc": scheduler = UniPCMultistepScheduler( prediction_type="flow_prediction", use_flow_sigmas=True, num_train_timesteps=1000, flow_shift=flow_shift, ) else: raise ValueError(f"Scheduler type {args.scheduler_type} is not supported") pipe = SanaVideoPipeline( tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler, ) pipe.save_pretrained(args.dump_path, safe_serialization=True, max_shard_size="5GB") DTYPE_MAPPING = { "fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16, } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--orig_ckpt_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--video_size", default=480, type=int, choices=[480, 720], required=False, help="Video size of pretrained model, 480 or 720.", ) parser.add_argument( "--model_type", default="SanaVideo", type=str, choices=[ "SanaVideo", ], ) parser.add_argument( "--scheduler_type", default="flow-dpm_solver", type=str, choices=["flow-dpm_solver", "flow-euler", "uni-pc"], help="Scheduler type to use.", ) parser.add_argument("--task", default="t2v", type=str, required=True, help="Task to convert, t2v or i2v.") parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output pipeline.") parser.add_argument("--save_full_pipeline", action="store_true", help="save all the pipeline elements in one.") parser.add_argument("--dtype", default="fp32", type=str, choices=["fp32", "fp16", "bf16"], help="Weight dtype.") args = parser.parse_args() device = "cuda" if torch.cuda.is_available() else "cpu" weight_dtype = DTYPE_MAPPING[args.dtype] main(args)
{ "repo_id": "huggingface/diffusers", "file_path": "scripts/convert_sana_video_to_diffusers.py", "license": "Apache License 2.0", "lines": 282, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
huggingface/diffusers:src/diffusers/models/transformers/transformer_sana_video.py
# Copyright 2025 The HuggingFace Team and SANA-Video Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...utils import apply_lora_scale, logging from ..attention import AttentionMixin from ..attention_dispatch import dispatch_attention_fn from ..attention_processor import Attention from ..embeddings import PixArtAlphaTextProjection, TimestepEmbedding, Timesteps, get_1d_rotary_pos_embed from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormSingle, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class GLUMBTempConv(nn.Module): def __init__( self, in_channels: int, out_channels: int, expand_ratio: float = 4, norm_type: str | None = None, residual_connection: bool = True, ) -> None: super().__init__() hidden_channels = int(expand_ratio * in_channels) self.norm_type = norm_type self.residual_connection = residual_connection self.nonlinearity = nn.SiLU() self.conv_inverted = nn.Conv2d(in_channels, hidden_channels * 2, 1, 1, 0) self.conv_depth = nn.Conv2d(hidden_channels * 2, hidden_channels * 2, 3, 1, 1, groups=hidden_channels * 2) self.conv_point = nn.Conv2d(hidden_channels, out_channels, 1, 1, 0, bias=False) self.norm = None if norm_type == "rms_norm": self.norm = RMSNorm(out_channels, eps=1e-5, elementwise_affine=True, bias=True) self.conv_temp = nn.Conv2d( out_channels, out_channels, kernel_size=(3, 1), stride=1, padding=(1, 0), bias=False ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: if self.residual_connection: residual = hidden_states batch_size, num_frames, height, width, num_channels = hidden_states.shape hidden_states = hidden_states.view(batch_size * num_frames, height, width, num_channels).permute(0, 3, 1, 2) hidden_states = self.conv_inverted(hidden_states) hidden_states = self.nonlinearity(hidden_states) hidden_states = self.conv_depth(hidden_states) hidden_states, gate = torch.chunk(hidden_states, 2, dim=1) hidden_states = hidden_states * self.nonlinearity(gate) hidden_states = self.conv_point(hidden_states) # Temporal aggregation hidden_states_temporal = hidden_states.view(batch_size, num_frames, num_channels, height * width).permute( 0, 2, 1, 3 ) hidden_states = hidden_states_temporal + self.conv_temp(hidden_states_temporal) hidden_states = hidden_states.permute(0, 2, 3, 1).view(batch_size, num_frames, height, width, num_channels) if self.norm_type == "rms_norm": # move channel to the last dimension so we apply RMSnorm across channel dimension hidden_states = self.norm(hidden_states.movedim(1, -1)).movedim(-1, 1) if self.residual_connection: hidden_states = hidden_states + residual return hidden_states class SanaLinearAttnProcessor3_0: r""" Processor for implementing scaled dot-product linear attention. """ def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: original_dtype = hidden_states.dtype if encoder_hidden_states is None: encoder_hidden_states = hidden_states query = attn.to_q(hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) query = query.unflatten(2, (attn.heads, -1)) key = key.unflatten(2, (attn.heads, -1)) value = value.unflatten(2, (attn.heads, -1)) # B,N,H,C query = F.relu(query) key = F.relu(key) if rotary_emb is not None: def apply_rotary_emb( hidden_states: torch.Tensor, freqs_cos: torch.Tensor, freqs_sin: torch.Tensor, ): x1, x2 = hidden_states.unflatten(-1, (-1, 2)).unbind(-1) cos = freqs_cos[..., 0::2] sin = freqs_sin[..., 1::2] out = torch.empty_like(hidden_states) out[..., 0::2] = x1 * cos - x2 * sin out[..., 1::2] = x1 * sin + x2 * cos return out.type_as(hidden_states) query_rotate = apply_rotary_emb(query, *rotary_emb) key_rotate = apply_rotary_emb(key, *rotary_emb) # B,H,C,N query = query.permute(0, 2, 3, 1) key = key.permute(0, 2, 3, 1) query_rotate = query_rotate.permute(0, 2, 3, 1) key_rotate = key_rotate.permute(0, 2, 3, 1) value = value.permute(0, 2, 3, 1) query_rotate, key_rotate, value = query_rotate.float(), key_rotate.float(), value.float() z = 1 / (key.sum(dim=-1, keepdim=True).transpose(-2, -1) @ query + 1e-15) scores = torch.matmul(value, key_rotate.transpose(-1, -2)) hidden_states = torch.matmul(scores, query_rotate) hidden_states = hidden_states * z # B,H,C,N hidden_states = hidden_states.flatten(1, 2).transpose(1, 2) hidden_states = hidden_states.to(original_dtype) hidden_states = attn.to_out[0](hidden_states) hidden_states = attn.to_out[1](hidden_states) return hidden_states class WanRotaryPosEmbed(nn.Module): def __init__( self, attention_head_dim: int, patch_size: tuple[int, int, int], max_seq_len: int, theta: float = 10000.0, ): super().__init__() self.attention_head_dim = attention_head_dim self.patch_size = patch_size self.max_seq_len = max_seq_len h_dim = w_dim = 2 * (attention_head_dim // 6) t_dim = attention_head_dim - h_dim - w_dim self.t_dim = t_dim self.h_dim = h_dim self.w_dim = w_dim freqs_dtype = torch.float32 if torch.backends.mps.is_available() else torch.float64 freqs_cos = [] freqs_sin = [] for dim in [t_dim, h_dim, w_dim]: freq_cos, freq_sin = get_1d_rotary_pos_embed( dim, max_seq_len, theta, use_real=True, repeat_interleave_real=True, freqs_dtype=freqs_dtype, ) freqs_cos.append(freq_cos) freqs_sin.append(freq_sin) self.register_buffer("freqs_cos", torch.cat(freqs_cos, dim=1), persistent=False) self.register_buffer("freqs_sin", torch.cat(freqs_sin, dim=1), persistent=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.patch_size ppf, pph, ppw = num_frames // p_t, height // p_h, width // p_w split_sizes = [self.t_dim, self.h_dim, self.w_dim] freqs_cos = self.freqs_cos.split(split_sizes, dim=1) freqs_sin = self.freqs_sin.split(split_sizes, dim=1) freqs_cos_f = freqs_cos[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) freqs_cos_h = freqs_cos[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) freqs_cos_w = freqs_cos[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) freqs_sin_f = freqs_sin[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1) freqs_sin_h = freqs_sin[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1) freqs_sin_w = freqs_sin[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1) freqs_cos = torch.cat([freqs_cos_f, freqs_cos_h, freqs_cos_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) freqs_sin = torch.cat([freqs_sin_f, freqs_sin_h, freqs_sin_w], dim=-1).reshape(1, ppf * pph * ppw, 1, -1) return freqs_cos, freqs_sin class SanaModulatedNorm(nn.Module): def __init__(self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6): super().__init__() self.norm = nn.LayerNorm(dim, elementwise_affine=elementwise_affine, eps=eps) def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, scale_shift_table: torch.Tensor ) -> torch.Tensor: hidden_states = self.norm(hidden_states) shift, scale = (scale_shift_table[None, None] + temb[:, :, None].to(scale_shift_table.device)).unbind(dim=2) hidden_states = hidden_states * (1 + scale) + shift return hidden_states class SanaCombinedTimestepGuidanceEmbeddings(nn.Module): def __init__(self, embedding_dim): super().__init__() self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.guidance_condition_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) def forward(self, timestep: torch.Tensor, guidance: torch.Tensor = None, hidden_dtype: torch.dtype = None): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_dtype)) # (N, D) guidance_proj = self.guidance_condition_proj(guidance) guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=hidden_dtype)) conditioning = timesteps_emb + guidance_emb return self.linear(self.silu(conditioning)), conditioning class SanaAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). """ _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("SanaAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") def __call__( self, attn: Attention, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, ) -> torch.Tensor: batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) if attn.norm_q is not None: query = attn.norm_q(query) if attn.norm_k is not None: key = attn.norm_k(key) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, attn.heads, head_dim) value = value.view(batch_size, -1, attn.heads, head_dim) # the output of sdp = (batch, num_heads, seq_len, head_dim) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False, backend=self._attention_backend, parallel_config=self._parallel_config, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.type_as(query) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class SanaVideoTransformerBlock(nn.Module): r""" Transformer block introduced in [Sana-Video](https://huggingface.co/papers/2509.24695). """ def __init__( self, dim: int = 2240, num_attention_heads: int = 20, attention_head_dim: int = 112, dropout: float = 0.0, num_cross_attention_heads: int | None = 20, cross_attention_head_dim: int | None = 112, cross_attention_dim: int | None = 2240, attention_bias: bool = True, norm_elementwise_affine: bool = False, norm_eps: float = 1e-6, attention_out_bias: bool = True, mlp_ratio: float = 3.0, qk_norm: str | None = "rms_norm_across_heads", rope_max_seq_len: int = 1024, ) -> None: super().__init__() # 1. Self Attention self.norm1 = nn.LayerNorm(dim, elementwise_affine=False, eps=norm_eps) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, kv_heads=num_attention_heads if qk_norm is not None else None, qk_norm=qk_norm, dropout=dropout, bias=attention_bias, cross_attention_dim=None, processor=SanaLinearAttnProcessor3_0(), ) # 2. Cross Attention if cross_attention_dim is not None: self.norm2 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn2 = Attention( query_dim=dim, qk_norm=qk_norm, kv_heads=num_cross_attention_heads if qk_norm is not None else None, cross_attention_dim=cross_attention_dim, heads=num_cross_attention_heads, dim_head=cross_attention_head_dim, dropout=dropout, bias=True, out_bias=attention_out_bias, processor=SanaAttnProcessor2_0(), ) # 3. Feed-forward self.ff = GLUMBTempConv(dim, dim, mlp_ratio, norm_type=None, residual_connection=False) self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None = None, encoder_hidden_states: torch.Tensor | None = None, encoder_attention_mask: torch.Tensor | None = None, timestep: torch.LongTensor | None = None, frames: int = None, height: int = None, width: int = None, rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: batch_size = hidden_states.shape[0] # 1. Modulation shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( self.scale_shift_table[None, None] + timestep.reshape(batch_size, timestep.shape[1], 6, -1) ).unbind(dim=2) # 2. Self Attention norm_hidden_states = self.norm1(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa norm_hidden_states = norm_hidden_states.to(hidden_states.dtype) attn_output = self.attn1(norm_hidden_states, rotary_emb=rotary_emb) hidden_states = hidden_states + gate_msa * attn_output # 3. Cross Attention if self.attn2 is not None: attn_output = self.attn2( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) hidden_states = attn_output + hidden_states # 4. Feed-forward norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp norm_hidden_states = norm_hidden_states.unflatten(1, (frames, height, width)) ff_output = self.ff(norm_hidden_states) ff_output = ff_output.flatten(1, 3) hidden_states = hidden_states + gate_mlp * ff_output return hidden_states class SanaVideoTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, AttentionMixin): r""" A 3D Transformer model introduced in [Sana-Video](https://huggingface.co/papers/2509.24695) family of models. Args: in_channels (`int`, defaults to `16`): The number of channels in the input. out_channels (`int`, *optional*, defaults to `16`): The number of channels in the output. num_attention_heads (`int`, defaults to `20`): The number of heads to use for multi-head attention. attention_head_dim (`int`, defaults to `112`): The number of channels in each head. num_layers (`int`, defaults to `20`): The number of layers of Transformer blocks to use. num_cross_attention_heads (`int`, *optional*, defaults to `20`): The number of heads to use for cross-attention. cross_attention_head_dim (`int`, *optional*, defaults to `112`): The number of channels in each head for cross-attention. cross_attention_dim (`int`, *optional*, defaults to `2240`): The number of channels in the cross-attention output. caption_channels (`int`, defaults to `2304`): The number of channels in the caption embeddings. mlp_ratio (`float`, defaults to `2.5`): The expansion ratio to use in the GLUMBConv layer. dropout (`float`, defaults to `0.0`): The dropout probability. attention_bias (`bool`, defaults to `False`): Whether to use bias in the attention layer. sample_size (`int`, defaults to `32`): The base size of the input latent. patch_size (`int`, defaults to `1`): The size of the patches to use in the patch embedding layer. norm_elementwise_affine (`bool`, defaults to `False`): Whether to use elementwise affinity in the normalization layer. norm_eps (`float`, defaults to `1e-6`): The epsilon value for the normalization layer. qk_norm (`str`, *optional*, defaults to `None`): The normalization to use for the query and key. """ _supports_gradient_checkpointing = True _no_split_modules = ["SanaVideoTransformerBlock", "SanaModulatedNorm"] _skip_layerwise_casting_patterns = ["patch_embedding", "norm"] @register_to_config def __init__( self, in_channels: int = 16, out_channels: int | None = 16, num_attention_heads: int = 20, attention_head_dim: int = 112, num_layers: int = 20, num_cross_attention_heads: int | None = 20, cross_attention_head_dim: int | None = 112, cross_attention_dim: int | None = 2240, caption_channels: int = 2304, mlp_ratio: float = 2.5, dropout: float = 0.0, attention_bias: bool = False, sample_size: int = 30, patch_size: tuple[int, int, int] = (1, 2, 2), norm_elementwise_affine: bool = False, norm_eps: float = 1e-6, interpolation_scale: int | None = None, guidance_embeds: bool = False, guidance_embeds_scale: float = 0.1, qk_norm: str | None = "rms_norm_across_heads", rope_max_seq_len: int = 1024, ) -> None: super().__init__() out_channels = out_channels or in_channels inner_dim = num_attention_heads * attention_head_dim # 1. Patch & position embedding self.rope = WanRotaryPosEmbed(attention_head_dim, patch_size, rope_max_seq_len) self.patch_embedding = nn.Conv3d(in_channels, inner_dim, kernel_size=patch_size, stride=patch_size) # 2. Additional condition embeddings if guidance_embeds: self.time_embed = SanaCombinedTimestepGuidanceEmbeddings(inner_dim) else: self.time_embed = AdaLayerNormSingle(inner_dim) self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim) self.caption_norm = RMSNorm(inner_dim, eps=1e-5, elementwise_affine=True) # 3. Transformer blocks self.transformer_blocks = nn.ModuleList( [ SanaVideoTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, num_cross_attention_heads=num_cross_attention_heads, cross_attention_head_dim=cross_attention_head_dim, cross_attention_dim=cross_attention_dim, attention_bias=attention_bias, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, mlp_ratio=mlp_ratio, qk_norm=qk_norm, ) for _ in range(num_layers) ] ) # 4. Output blocks self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5) self.norm_out = SanaModulatedNorm(inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(inner_dim, math.prod(patch_size) * out_channels) self.gradient_checkpointing = False @apply_lora_scale("attention_kwargs") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, timestep: torch.Tensor, guidance: torch.Tensor | None = None, encoder_attention_mask: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, attention_kwargs: dict[str, Any] | None = None, controlnet_block_samples: tuple[torch.Tensor] | None = None, return_dict: bool = True, ) -> tuple[torch.Tensor, ...] | Transformer2DModelOutput: # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Input batch_size, num_channels, num_frames, height, width = hidden_states.shape p_t, p_h, p_w = self.config.patch_size post_patch_num_frames = num_frames // p_t post_patch_height = height // p_h post_patch_width = width // p_w rotary_emb = self.rope(hidden_states) hidden_states = self.patch_embedding(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) if guidance is not None: timestep, embedded_timestep = self.time_embed( timestep.flatten(), guidance=guidance, hidden_dtype=hidden_states.dtype ) else: timestep, embedded_timestep = self.time_embed( timestep.flatten(), batch_size=batch_size, hidden_dtype=hidden_states.dtype ) timestep = timestep.view(batch_size, -1, timestep.size(-1)) embedded_timestep = embedded_timestep.view(batch_size, -1, embedded_timestep.size(-1)) encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) encoder_hidden_states = self.caption_norm(encoder_hidden_states) # 2. Transformer blocks if torch.is_grad_enabled() and self.gradient_checkpointing: for index_block, block in enumerate(self.transformer_blocks): hidden_states = self._gradient_checkpointing_func( block, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, post_patch_num_frames, post_patch_height, post_patch_width, rotary_emb, ) if controlnet_block_samples is not None and 0 < index_block <= len(controlnet_block_samples): hidden_states = hidden_states + controlnet_block_samples[index_block - 1] else: for index_block, block in enumerate(self.transformer_blocks): hidden_states = block( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, post_patch_num_frames, post_patch_height, post_patch_width, rotary_emb, ) if controlnet_block_samples is not None and 0 < index_block <= len(controlnet_block_samples): hidden_states = hidden_states + controlnet_block_samples[index_block - 1] # 3. Normalization hidden_states = self.norm_out(hidden_states, embedded_timestep, self.scale_shift_table) hidden_states = self.proj_out(hidden_states) # 5. Unpatchify hidden_states = hidden_states.reshape( batch_size, post_patch_num_frames, post_patch_height, post_patch_width, p_t, p_h, p_w, -1 ) hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6) output = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_sana_video.py", "license": "Apache License 2.0", "lines": 565, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:tests/models/transformers/test_models_transformer_sana_video.py
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import SanaVideoTransformer3DModel from ...testing_utils import ( enable_full_determinism, torch_device, ) from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin enable_full_determinism() class SanaVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): model_class = SanaVideoTransformer3DModel main_input_name = "hidden_states" uses_custom_attn_processor = True @property def dummy_input(self): batch_size = 1 num_channels = 16 num_frames = 2 height = 16 width = 16 text_encoder_embedding_dim = 16 sequence_length = 12 hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, } @property def input_shape(self): return (16, 2, 16, 16) @property def output_shape(self): return (16, 2, 16, 16) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 16, "out_channels": 16, "num_attention_heads": 2, "attention_head_dim": 12, "num_layers": 2, "num_cross_attention_heads": 2, "cross_attention_head_dim": 12, "cross_attention_dim": 24, "caption_channels": 16, "mlp_ratio": 2.5, "dropout": 0.0, "attention_bias": False, "sample_size": 8, "patch_size": (1, 2, 2), "norm_elementwise_affine": False, "norm_eps": 1e-6, "qk_norm": "rms_norm_across_heads", "rope_max_seq_len": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"SanaVideoTransformer3DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) class SanaVideoTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): model_class = SanaVideoTransformer3DModel def prepare_init_args_and_inputs_for_common(self): return SanaVideoTransformer3DTests().prepare_init_args_and_inputs_for_common()
{ "repo_id": "huggingface/diffusers", "file_path": "tests/models/transformers/test_models_transformer_sana_video.py", "license": "Apache License 2.0", "lines": 79, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:tests/modular_pipelines/flux/test_modular_pipeline_flux.py
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import tempfile import numpy as np import PIL import torch from diffusers.image_processor import VaeImageProcessor from diffusers.modular_pipelines import ( FluxAutoBlocks, FluxKontextAutoBlocks, FluxKontextModularPipeline, FluxModularPipeline, ModularPipeline, ) from ...testing_utils import floats_tensor, torch_device from ..test_modular_pipelines_common import ModularPipelineTesterMixin FLUX_TEXT2IMAGE_WORKFLOWS = { "text2image": [ ("text_encoder", "FluxTextEncoderStep"), ("denoise.input", "FluxTextInputStep"), ("denoise.before_denoise.prepare_latents", "FluxPrepareLatentsStep"), ("denoise.before_denoise.set_timesteps", "FluxSetTimestepsStep"), ("denoise.before_denoise.prepare_rope_inputs", "FluxRoPEInputsStep"), ("denoise.denoise", "FluxDenoiseStep"), ("decode", "FluxDecodeStep"), ] } class TestFluxModularPipelineFast(ModularPipelineTesterMixin): pipeline_class = FluxModularPipeline pipeline_blocks_class = FluxAutoBlocks pretrained_model_name_or_path = "hf-internal-testing/tiny-flux-modular" params = frozenset(["prompt", "height", "width", "guidance_scale"]) batch_params = frozenset(["prompt"]) expected_workflow_blocks = FLUX_TEXT2IMAGE_WORKFLOWS def get_dummy_inputs(self, seed=0): generator = self.get_generator(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 48, "output_type": "pt", } return inputs def test_float16_inference(self): super().test_float16_inference(9e-2) FLUX_IMAGE2IMAGE_WORKFLOWS = { "image2image": [ ("text_encoder", "FluxTextEncoderStep"), ("vae_encoder.preprocess", "FluxProcessImagesInputStep"), ("vae_encoder.encode", "FluxVaeEncoderStep"), ("denoise.input.text_inputs", "FluxTextInputStep"), ("denoise.input.additional_inputs", "FluxAdditionalInputsStep"), ("denoise.before_denoise.prepare_latents", "FluxPrepareLatentsStep"), ("denoise.before_denoise.set_timesteps", "FluxImg2ImgSetTimestepsStep"), ("denoise.before_denoise.prepare_img2img_latents", "FluxImg2ImgPrepareLatentsStep"), ("denoise.before_denoise.prepare_rope_inputs", "FluxRoPEInputsStep"), ("denoise.denoise", "FluxDenoiseStep"), ("decode", "FluxDecodeStep"), ] } class TestFluxImg2ImgModularPipelineFast(ModularPipelineTesterMixin): pipeline_class = FluxModularPipeline pipeline_blocks_class = FluxAutoBlocks pretrained_model_name_or_path = "hf-internal-testing/tiny-flux-modular" params = frozenset(["prompt", "height", "width", "guidance_scale", "image"]) batch_params = frozenset(["prompt", "image"]) expected_workflow_blocks = FLUX_IMAGE2IMAGE_WORKFLOWS def get_pipeline(self, components_manager=None, torch_dtype=torch.float32): pipeline = super().get_pipeline(components_manager, torch_dtype) # Override `vae_scale_factor` here as currently, `image_processor` is initialized with # fixed constants instead of # https://github.com/huggingface/diffusers/blob/d54622c2679d700b425ad61abce9b80fc36212c0/src/diffusers/pipelines/flux/pipeline_flux_img2img.py#L230C9-L232C10 pipeline.image_processor = VaeImageProcessor(vae_scale_factor=2) return pipeline def get_dummy_inputs(self, seed=0): generator = self.get_generator(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 4, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 48, "output_type": "pt", } image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(torch_device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = PIL.Image.fromarray(np.uint8(image)).convert("RGB") inputs["image"] = init_image inputs["strength"] = 0.5 return inputs def test_save_from_pretrained(self): pipes = [] base_pipe = self.get_pipeline().to(torch_device) pipes.append(base_pipe) with tempfile.TemporaryDirectory() as tmpdirname: base_pipe.save_pretrained(tmpdirname) pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device) pipe.load_components(torch_dtype=torch.float32) pipe.to(torch_device) pipe.image_processor = VaeImageProcessor(vae_scale_factor=2) pipes.append(pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs() image = pipe(**inputs, output="images") image_slices.append(image[0, -3:, -3:, -1].flatten()) assert torch.abs(image_slices[0] - image_slices[1]).max() < 1e-3 def test_float16_inference(self): super().test_float16_inference(8e-2) FLUX_KONTEXT_WORKFLOWS = { "text2image": [ ("text_encoder", "FluxTextEncoderStep"), ("denoise.input", "FluxTextInputStep"), ("denoise.before_denoise.prepare_latents", "FluxPrepareLatentsStep"), ("denoise.before_denoise.set_timesteps", "FluxSetTimestepsStep"), ("denoise.before_denoise.prepare_rope_inputs", "FluxRoPEInputsStep"), ("denoise.denoise", "FluxKontextDenoiseStep"), ("decode", "FluxDecodeStep"), ], "image_conditioned": [ ("text_encoder", "FluxTextEncoderStep"), ("vae_encoder.preprocess", "FluxKontextProcessImagesInputStep"), ("vae_encoder.encode", "FluxVaeEncoderStep"), ("denoise.input.set_resolution", "FluxKontextSetResolutionStep"), ("denoise.input.text_inputs", "FluxTextInputStep"), ("denoise.input.additional_inputs", "FluxKontextAdditionalInputsStep"), ("denoise.before_denoise.prepare_latents", "FluxPrepareLatentsStep"), ("denoise.before_denoise.set_timesteps", "FluxSetTimestepsStep"), ("denoise.before_denoise.prepare_rope_inputs", "FluxKontextRoPEInputsStep"), ("denoise.denoise", "FluxKontextDenoiseStep"), ("decode", "FluxDecodeStep"), ], } class TestFluxKontextModularPipelineFast(ModularPipelineTesterMixin): pipeline_class = FluxKontextModularPipeline pipeline_blocks_class = FluxKontextAutoBlocks pretrained_model_name_or_path = "hf-internal-testing/tiny-flux-kontext-pipe" params = frozenset(["prompt", "height", "width", "guidance_scale", "image"]) batch_params = frozenset(["prompt", "image"]) expected_workflow_blocks = FLUX_KONTEXT_WORKFLOWS def get_dummy_inputs(self, seed=0): generator = self.get_generator(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "height": 8, "width": 8, "max_sequence_length": 48, "output_type": "pt", } image = PIL.Image.new("RGB", (32, 32), 0) inputs["image"] = image inputs["max_area"] = inputs["height"] * inputs["width"] inputs["_auto_resize"] = False return inputs def test_save_from_pretrained(self): pipes = [] base_pipe = self.get_pipeline().to(torch_device) pipes.append(base_pipe) with tempfile.TemporaryDirectory() as tmpdirname: base_pipe.save_pretrained(tmpdirname) pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device) pipe.load_components(torch_dtype=torch.float32) pipe.to(torch_device) pipe.image_processor = VaeImageProcessor(vae_scale_factor=2) pipes.append(pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs() image = pipe(**inputs, output="images") image_slices.append(image[0, -3:, -3:, -1].flatten()) assert torch.abs(image_slices[0] - image_slices[1]).max() < 1e-3 def test_float16_inference(self): super().test_float16_inference(9e-2)
{ "repo_id": "huggingface/diffusers", "file_path": "tests/modular_pipelines/flux/test_modular_pipeline_flux.py", "license": "Apache License 2.0", "lines": 195, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
huggingface/diffusers:src/diffusers/models/transformers/transformer_bria_fibo.py
# Copyright (c) Bria.ai. All rights reserved. # # This file is licensed under the Creative Commons Attribution-NonCommercial 4.0 International Public License (CC-BY-NC-4.0). # You may obtain a copy of the license at https://creativecommons.org/licenses/by-nc/4.0/ # # You are free to share and adapt this material for non-commercial purposes provided you give appropriate credit, # indicate if changes were made, and do not use the material for commercial purposes. # # See the license for further details. import inspect from typing import Any import torch import torch.nn as nn import torch.nn.functional as F from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import FromOriginalModelMixin, PeftAdapterMixin from ...models.attention_processor import Attention from ...models.embeddings import TimestepEmbedding, apply_rotary_emb, get_1d_rotary_pos_embed, get_timestep_embedding from ...models.modeling_outputs import Transformer2DModelOutput from ...models.modeling_utils import ModelMixin from ...models.transformers.transformer_bria import BriaAttnProcessor from ...utils import ( apply_lora_scale, logging, ) from ...utils.torch_utils import maybe_allow_in_graph from ..attention import AttentionModuleMixin, FeedForward from ..attention_dispatch import dispatch_attention_fn from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _get_projections(attn: "BriaFiboAttention", hidden_states, encoder_hidden_states=None): query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) encoder_query = encoder_key = encoder_value = None if encoder_hidden_states is not None and attn.added_kv_proj_dim is not None: encoder_query = attn.add_q_proj(encoder_hidden_states) encoder_key = attn.add_k_proj(encoder_hidden_states) encoder_value = attn.add_v_proj(encoder_hidden_states) return query, key, value, encoder_query, encoder_key, encoder_value def _get_fused_projections(attn: "BriaFiboAttention", hidden_states, encoder_hidden_states=None): query, key, value = attn.to_qkv(hidden_states).chunk(3, dim=-1) encoder_query = encoder_key = encoder_value = (None,) if encoder_hidden_states is not None and hasattr(attn, "to_added_qkv"): encoder_query, encoder_key, encoder_value = attn.to_added_qkv(encoder_hidden_states).chunk(3, dim=-1) return query, key, value, encoder_query, encoder_key, encoder_value def _get_qkv_projections(attn: "BriaFiboAttention", hidden_states, encoder_hidden_states=None): if attn.fused_projections: return _get_fused_projections(attn, hidden_states, encoder_hidden_states) return _get_projections(attn, hidden_states, encoder_hidden_states) # Copied from diffusers.models.transformers.transformer_flux.FluxAttnProcessor with FluxAttnProcessor->BriaFiboAttnProcessor, FluxAttention->BriaFiboAttention class BriaFiboAttnProcessor: _attention_backend = None _parallel_config = None def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError(f"{self.__class__.__name__} requires PyTorch 2.0. Please upgrade your pytorch version.") def __call__( self, attn: "BriaFiboAttention", hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, ) -> torch.Tensor: query, key, value, encoder_query, encoder_key, encoder_value = _get_qkv_projections( attn, hidden_states, encoder_hidden_states ) query = query.unflatten(-1, (attn.heads, -1)) key = key.unflatten(-1, (attn.heads, -1)) value = value.unflatten(-1, (attn.heads, -1)) query = attn.norm_q(query) key = attn.norm_k(key) if attn.added_kv_proj_dim is not None: encoder_query = encoder_query.unflatten(-1, (attn.heads, -1)) encoder_key = encoder_key.unflatten(-1, (attn.heads, -1)) encoder_value = encoder_value.unflatten(-1, (attn.heads, -1)) encoder_query = attn.norm_added_q(encoder_query) encoder_key = attn.norm_added_k(encoder_key) query = torch.cat([encoder_query, query], dim=1) key = torch.cat([encoder_key, key], dim=1) value = torch.cat([encoder_value, value], dim=1) if image_rotary_emb is not None: query = apply_rotary_emb(query, image_rotary_emb, sequence_dim=1) key = apply_rotary_emb(key, image_rotary_emb, sequence_dim=1) hidden_states = dispatch_attention_fn( query, key, value, attn_mask=attention_mask, backend=self._attention_backend, parallel_config=self._parallel_config, ) hidden_states = hidden_states.flatten(2, 3) hidden_states = hidden_states.to(query.dtype) if encoder_hidden_states is not None: encoder_hidden_states, hidden_states = hidden_states.split_with_sizes( [encoder_hidden_states.shape[1], hidden_states.shape[1] - encoder_hidden_states.shape[1]], dim=1 ) hidden_states = attn.to_out[0](hidden_states.contiguous()) hidden_states = attn.to_out[1](hidden_states) encoder_hidden_states = attn.to_add_out(encoder_hidden_states.contiguous()) return hidden_states, encoder_hidden_states else: return hidden_states # Based on https://github.com/huggingface/diffusers/blob/55d49d4379007740af20629bb61aba9546c6b053/src/diffusers/models/transformers/transformer_flux.py class BriaFiboAttention(torch.nn.Module, AttentionModuleMixin): _default_processor_cls = BriaFiboAttnProcessor _available_processors = [BriaFiboAttnProcessor] def __init__( self, query_dim: int, heads: int = 8, dim_head: int = 64, dropout: float = 0.0, bias: bool = False, added_kv_proj_dim: int | None = None, added_proj_bias: bool | None = True, out_bias: bool = True, eps: float = 1e-5, out_dim: int = None, context_pre_only: bool | None = None, pre_only: bool = False, elementwise_affine: bool = True, processor=None, ): super().__init__() self.head_dim = dim_head self.inner_dim = out_dim if out_dim is not None else dim_head * heads self.query_dim = query_dim self.use_bias = bias self.dropout = dropout self.out_dim = out_dim if out_dim is not None else query_dim self.context_pre_only = context_pre_only self.pre_only = pre_only self.heads = out_dim // dim_head if out_dim is not None else heads self.added_kv_proj_dim = added_kv_proj_dim self.added_proj_bias = added_proj_bias self.norm_q = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.norm_k = torch.nn.RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine) self.to_q = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_k = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) self.to_v = torch.nn.Linear(query_dim, self.inner_dim, bias=bias) if not self.pre_only: self.to_out = torch.nn.ModuleList([]) self.to_out.append(torch.nn.Linear(self.inner_dim, self.out_dim, bias=out_bias)) self.to_out.append(torch.nn.Dropout(dropout)) if added_kv_proj_dim is not None: self.norm_added_q = torch.nn.RMSNorm(dim_head, eps=eps) self.norm_added_k = torch.nn.RMSNorm(dim_head, eps=eps) self.add_q_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_k_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.add_v_proj = torch.nn.Linear(added_kv_proj_dim, self.inner_dim, bias=added_proj_bias) self.to_add_out = torch.nn.Linear(self.inner_dim, query_dim, bias=out_bias) if processor is None: processor = self._default_processor_cls() self.set_processor(processor) def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor | None = None, attention_mask: torch.Tensor | None = None, image_rotary_emb: torch.Tensor | None = None, **kwargs, ) -> torch.Tensor: attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) quiet_attn_parameters = {"ip_adapter_masks", "ip_hidden_states"} unused_kwargs = [k for k, _ in kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters] if len(unused_kwargs) > 0: logger.warning( f"joint_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." ) kwargs = {k: w for k, w in kwargs.items() if k in attn_parameters} return self.processor(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs) class BriaFiboEmbedND(torch.nn.Module): # modified from https://github.com/black-forest-labs/flux/blob/c00d7c60b085fce8058b9df845e036090873f2ce/src/flux/modules/layers.py#L11 def __init__(self, theta: int, axes_dim: list[int]): super().__init__() self.theta = theta self.axes_dim = axes_dim def forward(self, ids: torch.Tensor) -> torch.Tensor: n_axes = ids.shape[-1] cos_out = [] sin_out = [] pos = ids.float() is_mps = ids.device.type == "mps" freqs_dtype = torch.float32 if is_mps else torch.float64 for i in range(n_axes): cos, sin = get_1d_rotary_pos_embed( self.axes_dim[i], pos[:, i], theta=self.theta, repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype, ) cos_out.append(cos) sin_out.append(sin) freqs_cos = torch.cat(cos_out, dim=-1).to(ids.device) freqs_sin = torch.cat(sin_out, dim=-1).to(ids.device) return freqs_cos, freqs_sin @maybe_allow_in_graph class BriaFiboSingleTransformerBlock(nn.Module): def __init__(self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) self.norm = AdaLayerNormZeroSingle(dim) self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) processor = BriaAttnProcessor() self.attn = Attention( query_dim=dim, cross_attention_dim=None, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, bias=True, processor=processor, qk_norm="rms_norm", eps=1e-6, pre_only=True, ) def forward( self, hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, joint_attention_kwargs: dict[str, Any] | None = None, ) -> torch.Tensor: residual = hidden_states norm_hidden_states, gate = self.norm(hidden_states, emb=temb) mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) joint_attention_kwargs = joint_attention_kwargs or {} attn_output = self.attn( hidden_states=norm_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) gate = gate.unsqueeze(1) hidden_states = gate * self.proj_out(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: hidden_states = hidden_states.clip(-65504, 65504) return hidden_states class BriaFiboTextProjection(nn.Module): def __init__(self, in_features, hidden_size): super().__init__() self.linear = nn.Linear(in_features=in_features, out_features=hidden_size, bias=False) def forward(self, caption): hidden_states = self.linear(caption) return hidden_states @maybe_allow_in_graph # Based on from diffusers.models.transformers.transformer_flux.FluxTransformerBlock class BriaFiboTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6 ): super().__init__() self.norm1 = AdaLayerNormZero(dim) self.norm1_context = AdaLayerNormZero(dim) self.attn = BriaFiboAttention( query_dim=dim, added_kv_proj_dim=dim, dim_head=attention_head_dim, heads=num_attention_heads, out_dim=dim, context_pre_only=False, bias=True, processor=BriaFiboAttnProcessor(), eps=eps, ) self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor, image_rotary_emb: tuple[torch.Tensor, torch.Tensor] | None = None, joint_attention_kwargs: dict[str, Any] | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( encoder_hidden_states, emb=temb ) joint_attention_kwargs = joint_attention_kwargs or {} # Attention. attention_outputs = self.attn( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, **joint_attention_kwargs, ) if len(attention_outputs) == 2: attn_output, context_attn_output = attention_outputs elif len(attention_outputs) == 3: attn_output, context_attn_output, ip_attn_output = attention_outputs # Process attention outputs for the `hidden_states`. attn_output = gate_msa.unsqueeze(1) * attn_output hidden_states = hidden_states + attn_output norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm_hidden_states) ff_output = gate_mlp.unsqueeze(1) * ff_output hidden_states = hidden_states + ff_output if len(attention_outputs) == 3: hidden_states = hidden_states + ip_attn_output # Process attention outputs for the `encoder_hidden_states`. context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output encoder_hidden_states = encoder_hidden_states + context_attn_output norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] context_ff_output = self.ff_context(norm_encoder_hidden_states) encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output if encoder_hidden_states.dtype == torch.float16: encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) return encoder_hidden_states, hidden_states class BriaFiboTimesteps(nn.Module): def __init__( self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float, scale: int = 1, time_theta=10000 ): super().__init__() self.num_channels = num_channels self.flip_sin_to_cos = flip_sin_to_cos self.downscale_freq_shift = downscale_freq_shift self.scale = scale self.time_theta = time_theta def forward(self, timesteps): t_emb = get_timestep_embedding( timesteps, self.num_channels, flip_sin_to_cos=self.flip_sin_to_cos, downscale_freq_shift=self.downscale_freq_shift, scale=self.scale, max_period=self.time_theta, ) return t_emb class BriaFiboTimestepProjEmbeddings(nn.Module): def __init__(self, embedding_dim, time_theta): super().__init__() self.time_proj = BriaFiboTimesteps( num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0, time_theta=time_theta ) self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) def forward(self, timestep, dtype): timesteps_proj = self.time_proj(timestep) timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=dtype)) # (N, D) return timesteps_emb class BriaFiboTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): """ Parameters: patch_size (`int`): Patch size to turn the input data into small patches. in_channels (`int`, *optional*, defaults to 16): The number of channels in the input. num_layers (`int`, *optional*, defaults to 18): The number of layers of MMDiT blocks to use. num_single_layers (`int`, *optional*, defaults to 18): The number of layers of single DiT blocks to use. attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head. num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention. joint_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. pooled_projection_dim (`int`): Number of dimensions to use when projecting the `pooled_projections`. guidance_embeds (`bool`, defaults to False): Whether to use guidance embeddings. ... """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, patch_size: int = 1, in_channels: int = 64, num_layers: int = 19, num_single_layers: int = 38, attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, pooled_projection_dim: int = None, guidance_embeds: bool = False, axes_dims_rope: list[int] = [16, 56, 56], rope_theta=10000, time_theta=10000, text_encoder_dim: int = 2048, ): super().__init__() self.out_channels = in_channels self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim self.pos_embed = BriaFiboEmbedND(theta=rope_theta, axes_dim=axes_dims_rope) self.time_embed = BriaFiboTimestepProjEmbeddings(embedding_dim=self.inner_dim, time_theta=time_theta) if guidance_embeds: self.guidance_embed = BriaFiboTimestepProjEmbeddings(embedding_dim=self.inner_dim) self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim) self.x_embedder = torch.nn.Linear(self.config.in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ BriaFiboTransformerBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim, ) for i in range(self.config.num_layers) ] ) self.single_transformer_blocks = nn.ModuleList( [ BriaFiboSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=self.config.num_attention_heads, attention_head_dim=self.config.attention_head_dim, ) for i in range(self.config.num_single_layers) ] ) self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False caption_projection = [ BriaFiboTextProjection(in_features=text_encoder_dim, hidden_size=self.inner_dim // 2) for i in range(self.config.num_layers + self.config.num_single_layers) ] self.caption_projection = nn.ModuleList(caption_projection) @apply_lora_scale("joint_attention_kwargs") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, text_encoder_layers: list = None, pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, guidance: torch.Tensor = None, joint_attention_kwargs: dict[str, Any] | None = None, return_dict: bool = True, ) -> torch.FloatTensor | Transformer2DModelOutput: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input `hidden_states`. encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected from the embeddings of input conditions. timestep ( `torch.LongTensor`): Used to indicate denoising step. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ hidden_states = self.x_embedder(hidden_states) timestep = timestep.to(hidden_states.dtype) if guidance is not None: guidance = guidance.to(hidden_states.dtype) else: guidance = None temb = self.time_embed(timestep, dtype=hidden_states.dtype) if guidance: temb += self.guidance_embed(guidance, dtype=hidden_states.dtype) encoder_hidden_states = self.context_embedder(encoder_hidden_states) if len(txt_ids.shape) == 3: txt_ids = txt_ids[0] if len(img_ids.shape) == 3: img_ids = img_ids[0] ids = torch.cat((txt_ids, img_ids), dim=0) image_rotary_emb = self.pos_embed(ids) new_text_encoder_layers = [] for i, text_encoder_layer in enumerate(text_encoder_layers): text_encoder_layer = self.caption_projection[i](text_encoder_layer) new_text_encoder_layers.append(text_encoder_layer) text_encoder_layers = new_text_encoder_layers block_id = 0 for index_block, block in enumerate(self.transformer_blocks): current_text_encoder_layer = text_encoder_layers[block_id] encoder_hidden_states = torch.cat( [encoder_hidden_states[:, :, : self.inner_dim // 2], current_text_encoder_layer], dim=-1 ) block_id += 1 if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, hidden_states, encoder_hidden_states, temb, image_rotary_emb, joint_attention_kwargs, ) else: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, joint_attention_kwargs=joint_attention_kwargs, ) for index_block, block in enumerate(self.single_transformer_blocks): current_text_encoder_layer = text_encoder_layers[block_id] encoder_hidden_states = torch.cat( [encoder_hidden_states[:, :, : self.inner_dim // 2], current_text_encoder_layer], dim=-1 ) block_id += 1 hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, hidden_states, temb, image_rotary_emb, joint_attention_kwargs, ) else: hidden_states = block( hidden_states=hidden_states, temb=temb, image_rotary_emb=image_rotary_emb, joint_attention_kwargs=joint_attention_kwargs, ) encoder_hidden_states = hidden_states[:, : encoder_hidden_states.shape[1], ...] hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...] hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/models/transformers/transformer_bria_fibo.py", "license": "Apache License 2.0", "lines": 530, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/bria_fibo/pipeline_bria_fibo.py
# Copyright (c) Bria.ai. All rights reserved. # # This file is licensed under the Creative Commons Attribution-NonCommercial 4.0 International Public License (CC-BY-NC-4.0). # You may obtain a copy of the license at https://creativecommons.org/licenses/by-nc/4.0/ # # You are free to share and adapt this material for non-commercial purposes provided you give appropriate credit, # indicate if changes were made, and do not use the material for commercial purposes. # # See the license for further details. from typing import Any, Callable import numpy as np import torch from transformers import AutoTokenizer from transformers.models.smollm3.modeling_smollm3 import SmolLM3ForCausalLM from ...image_processor import VaeImageProcessor from ...loaders import FluxLoraLoaderMixin from ...models.autoencoders.autoencoder_kl_wan import AutoencoderKLWan from ...models.transformers.transformer_bria_fibo import BriaFiboTransformer2DModel from ...pipelines.bria_fibo.pipeline_output import BriaFiboPipelineOutput from ...pipelines.flux.pipeline_flux import calculate_shift, retrieve_timesteps from ...pipelines.pipeline_utils import DiffusionPipeline from ...schedulers import FlowMatchEulerDiscreteScheduler, KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, is_torch_xla_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Example: ```python import torch from diffusers import BriaFiboPipeline from diffusers.modular_pipelines import ModularPipeline torch.set_grad_enabled(False) vlm_pipe = ModularPipeline.from_pretrained("briaai/FIBO-VLM-prompt-to-JSON", trust_remote_code=True) pipe = BriaFiboPipeline.from_pretrained( "briaai/FIBO", trust_remote_code=True, torch_dtype=torch.bfloat16, ) pipe.enable_model_cpu_offload() with torch.inference_mode(): # 1. Create a prompt to generate an initial image output = vlm_pipe(prompt="a beautiful dog") json_prompt_generate = output.values["json_prompt"] # Generate the image from the structured json prompt results_generate = pipe(prompt=json_prompt_generate, num_inference_steps=50, guidance_scale=5) results_generate.images[0].save("image_generate.png") ``` """ class BriaFiboPipeline(DiffusionPipeline, FluxLoraLoaderMixin): r""" Args: transformer (`BriaFiboTransformer2DModel`): The transformer model for 2D diffusion modeling. scheduler (`FlowMatchEulerDiscreteScheduler` or `KarrasDiffusionSchedulers`): Scheduler to be used with `transformer` to denoise the encoded latents. vae (`AutoencoderKLWan`): Variational Auto-Encoder for encoding and decoding images to and from latent representations. text_encoder (`SmolLM3ForCausalLM`): Text encoder for processing input prompts. tokenizer (`AutoTokenizer`): Tokenizer used for processing the input text prompts for the text_encoder. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->transformer->vae" _callback_tensor_inputs = ["latents", "prompt_embeds"] def __init__( self, transformer: BriaFiboTransformer2DModel, scheduler: FlowMatchEulerDiscreteScheduler | KarrasDiffusionSchedulers, vae: AutoencoderKLWan, text_encoder: SmolLM3ForCausalLM, tokenizer: AutoTokenizer, ): self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, ) self.vae_scale_factor = 16 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.default_sample_size = 64 def get_prompt_embeds( self, prompt: str | list[str], num_images_per_prompt: int = 1, max_sequence_length: int = 2048, device: torch.device | None = None, dtype: torch.dtype | None = None, ): device = device or self._execution_device dtype = dtype or self.text_encoder.dtype prompt = [prompt] if isinstance(prompt, str) else prompt if not prompt: raise ValueError("`prompt` must be a non-empty string or list of strings.") batch_size = len(prompt) bot_token_id = 128000 text_encoder_device = device if device is not None else torch.device("cpu") if not isinstance(text_encoder_device, torch.device): text_encoder_device = torch.device(text_encoder_device) if all(p == "" for p in prompt): input_ids = torch.full((batch_size, 1), bot_token_id, dtype=torch.long, device=text_encoder_device) attention_mask = torch.ones_like(input_ids) else: tokenized = self.tokenizer( prompt, padding="longest", max_length=max_sequence_length, truncation=True, add_special_tokens=True, return_tensors="pt", ) input_ids = tokenized.input_ids.to(text_encoder_device) attention_mask = tokenized.attention_mask.to(text_encoder_device) if any(p == "" for p in prompt): empty_rows = torch.tensor([p == "" for p in prompt], dtype=torch.bool, device=text_encoder_device) input_ids[empty_rows] = bot_token_id attention_mask[empty_rows] = 1 encoder_outputs = self.text_encoder( input_ids, attention_mask=attention_mask, output_hidden_states=True, ) hidden_states = encoder_outputs.hidden_states prompt_embeds = torch.cat([hidden_states[-1], hidden_states[-2]], dim=-1) prompt_embeds = prompt_embeds.to(device=device, dtype=dtype) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) hidden_states = tuple( layer.repeat_interleave(num_images_per_prompt, dim=0).to(device=device) for layer in hidden_states ) attention_mask = attention_mask.repeat_interleave(num_images_per_prompt, dim=0).to(device=device) return prompt_embeds, hidden_states, attention_mask @staticmethod def pad_embedding(prompt_embeds, max_tokens, attention_mask=None): # Pad embeddings to `max_tokens` while preserving the mask of real tokens. batch_size, seq_len, dim = prompt_embeds.shape if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), dtype=prompt_embeds.dtype, device=prompt_embeds.device) else: attention_mask = attention_mask.to(device=prompt_embeds.device, dtype=prompt_embeds.dtype) if max_tokens < seq_len: raise ValueError("`max_tokens` must be greater or equal to the current sequence length.") if max_tokens > seq_len: pad_length = max_tokens - seq_len padding = torch.zeros( (batch_size, pad_length, dim), dtype=prompt_embeds.dtype, device=prompt_embeds.device ) prompt_embeds = torch.cat([prompt_embeds, padding], dim=1) mask_padding = torch.zeros( (batch_size, pad_length), dtype=prompt_embeds.dtype, device=prompt_embeds.device ) attention_mask = torch.cat([attention_mask, mask_padding], dim=1) return prompt_embeds, attention_mask def encode_prompt( self, prompt: str | list[str], device: torch.device | None = None, num_images_per_prompt: int = 1, guidance_scale: float = 5, negative_prompt: str | list[str] | None = None, prompt_embeds: torch.FloatTensor | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, max_sequence_length: int = 3000, lora_scale: float | None = None, ): r""" Args: prompt (`str` or `list[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt guidance_scale (`float`): Guidance scale for classifier free guidance. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] prompt_attention_mask = None negative_prompt_attention_mask = None if prompt_embeds is None: prompt_embeds, prompt_layers, prompt_attention_mask = self.get_prompt_embeds( prompt=prompt, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, ) prompt_embeds = prompt_embeds.to(dtype=self.transformer.dtype) prompt_layers = [tensor.to(dtype=self.transformer.dtype) for tensor in prompt_layers] if guidance_scale > 1: if isinstance(negative_prompt, list) and negative_prompt[0] is None: negative_prompt = "" negative_prompt = negative_prompt or "" negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) negative_prompt_embeds, negative_prompt_layers, negative_prompt_attention_mask = self.get_prompt_embeds( prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, ) negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.transformer.dtype) negative_prompt_layers = [tensor.to(dtype=self.transformer.dtype) for tensor in negative_prompt_layers] if self.text_encoder is not None: if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) # Pad to longest if prompt_attention_mask is not None: prompt_attention_mask = prompt_attention_mask.to(device=prompt_embeds.device, dtype=prompt_embeds.dtype) if negative_prompt_embeds is not None: if negative_prompt_attention_mask is not None: negative_prompt_attention_mask = negative_prompt_attention_mask.to( device=negative_prompt_embeds.device, dtype=negative_prompt_embeds.dtype ) max_tokens = max(negative_prompt_embeds.shape[1], prompt_embeds.shape[1]) prompt_embeds, prompt_attention_mask = self.pad_embedding( prompt_embeds, max_tokens, attention_mask=prompt_attention_mask ) prompt_layers = [self.pad_embedding(layer, max_tokens)[0] for layer in prompt_layers] negative_prompt_embeds, negative_prompt_attention_mask = self.pad_embedding( negative_prompt_embeds, max_tokens, attention_mask=negative_prompt_attention_mask ) negative_prompt_layers = [self.pad_embedding(layer, max_tokens)[0] for layer in negative_prompt_layers] else: max_tokens = prompt_embeds.shape[1] prompt_embeds, prompt_attention_mask = self.pad_embedding( prompt_embeds, max_tokens, attention_mask=prompt_attention_mask ) negative_prompt_layers = None dtype = self.text_encoder.dtype text_ids = torch.zeros(prompt_embeds.shape[0], max_tokens, 3).to(device=device, dtype=dtype) return ( prompt_embeds, negative_prompt_embeds, text_ids, prompt_attention_mask, negative_prompt_attention_mask, prompt_layers, negative_prompt_layers, ) @property def guidance_scale(self): return self._guidance_scale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def joint_attention_kwargs(self): return self._joint_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @property def interrupt(self): return self._interrupt @staticmethod # Based on diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents def _unpack_latents(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape height = height // vae_scale_factor width = width // vae_scale_factor latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) latents = latents.permute(0, 3, 1, 4, 2, 5) latents = latents.reshape(batch_size, channels // (2 * 2), height, width) return latents @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids def _prepare_latent_image_ids(batch_size, height, width, device, dtype): latent_image_ids = torch.zeros(height, width, 3) latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape latent_image_ids = latent_image_ids.reshape( latent_image_id_height * latent_image_id_width, latent_image_id_channels ) return latent_image_ids.to(device=device, dtype=dtype) @staticmethod def _unpack_latents_no_patch(latents, height, width, vae_scale_factor): batch_size, num_patches, channels = latents.shape height = height // vae_scale_factor width = width // vae_scale_factor latents = latents.view(batch_size, height, width, channels) latents = latents.permute(0, 3, 1, 2) return latents @staticmethod def _pack_latents_no_patch(latents, batch_size, num_channels_latents, height, width): latents = latents.permute(0, 2, 3, 1) latents = latents.reshape(batch_size, height * width, num_channels_latents) return latents @staticmethod # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents def _pack_latents(latents, batch_size, num_channels_latents, height, width): latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) latents = latents.permute(0, 2, 4, 1, 3, 5) latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) return latents def prepare_latents( self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, do_patching=False, ): height = int(height) // self.vae_scale_factor width = int(width) // self.vae_scale_factor shape = (batch_size, num_channels_latents, height, width) if latents is not None: latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) return latents.to(device=device, dtype=dtype), latent_image_ids if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) if do_patching: latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) else: latents = self._pack_latents_no_patch(latents, batch_size, num_channels_latents, height, width) latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype) return latents, latent_image_ids @staticmethod def _prepare_attention_mask(attention_mask): attention_matrix = torch.einsum("bi,bj->bij", attention_mask, attention_mask) # convert to 0 - keep, -inf ignore attention_matrix = torch.where( attention_matrix == 1, 0.0, -torch.inf ) # Apply -inf to ignored tokens for nulling softmax score return attention_matrix @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str | list[str] = None, height: int | None = None, width: int | None = None, num_inference_steps: int = 30, timesteps: list[int] = None, guidance_scale: float = 5, negative_prompt: str | list[str] | None = None, num_images_per_prompt: int | None = 1, generator: torch.Generator | list[torch.Generator] | None = None, latents: torch.FloatTensor | None = None, prompt_embeds: torch.FloatTensor | None = None, negative_prompt_embeds: torch.FloatTensor | None = None, output_type: str | None = "pil", return_dict: bool = True, joint_attention_kwargs: dict[str, Any] | None = None, callback_on_step_end: Callable[[int, int], None] | None = None, callback_on_step_end_tensor_inputs: list[str] = ["latents"], max_sequence_length: int = 3000, do_patching=False, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `list[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`list[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `list[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `list[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. max_sequence_length (`int` defaults to 3000): Maximum sequence length to use with the `prompt`. do_patching (`bool`, *optional*, defaults to `False`): Whether to use patching. Examples: Returns: [`~pipelines.flux.BriaFiboPipelineOutput`] or `tuple`: [`~pipelines.flux.BriaFiboPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, prompt_embeds=prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) self._guidance_scale = guidance_scale self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device lora_scale = ( self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, text_ids, prompt_attention_mask, negative_prompt_attention_mask, prompt_layers, negative_prompt_layers, ) = self.encode_prompt( prompt=prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, device=device, max_sequence_length=max_sequence_length, num_images_per_prompt=num_images_per_prompt, lora_scale=lora_scale, ) prompt_batch_size = prompt_embeds.shape[0] if guidance_scale > 1: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_layers = [ torch.cat([negative_prompt_layers[i], prompt_layers[i]], dim=0) for i in range(len(prompt_layers)) ] prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) total_num_layers_transformer = len(self.transformer.transformer_blocks) + len( self.transformer.single_transformer_blocks ) if len(prompt_layers) >= total_num_layers_transformer: # remove first layers prompt_layers = prompt_layers[len(prompt_layers) - total_num_layers_transformer :] else: # duplicate last layer prompt_layers = prompt_layers + [prompt_layers[-1]] * (total_num_layers_transformer - len(prompt_layers)) # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels if do_patching: num_channels_latents = int(num_channels_latents / 4) latents, latent_image_ids = self.prepare_latents( prompt_batch_size, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, do_patching, ) latent_attention_mask = torch.ones( [latents.shape[0], latents.shape[1]], dtype=latents.dtype, device=latents.device ) if guidance_scale > 1: latent_attention_mask = latent_attention_mask.repeat(2, 1) attention_mask = torch.cat([prompt_attention_mask, latent_attention_mask], dim=1) attention_mask = self._prepare_attention_mask(attention_mask) # batch, seq => batch, seq, seq attention_mask = attention_mask.unsqueeze(dim=1).to(dtype=self.transformer.dtype) # for head broadcasting if self._joint_attention_kwargs is None: self._joint_attention_kwargs = {} self._joint_attention_kwargs["attention_mask"] = attention_mask # Adapt scheduler to dynamic shifting (resolution dependent) if do_patching: seq_len = (height // (self.vae_scale_factor * 2)) * (width // (self.vae_scale_factor * 2)) else: seq_len = (height // self.vae_scale_factor) * (width // self.vae_scale_factor) sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) mu = calculate_shift( seq_len, self.scheduler.config.base_image_seq_len, self.scheduler.config.max_image_seq_len, self.scheduler.config.base_shift, self.scheduler.config.max_shift, ) # Init sigmas and timesteps according to shift size # This changes the scheduler in-place according to the dynamic scheduling timesteps, num_inference_steps = retrieve_timesteps( self.scheduler, num_inference_steps=num_inference_steps, device=device, timesteps=None, sigmas=sigmas, mu=mu, ) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # Support old different diffusers versions if len(latent_image_ids.shape) == 3: latent_image_ids = latent_image_ids[0] if len(text_ids.shape) == 3: text_ids = text_ids[0] # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1 else latents # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]).to( device=latent_model_input.device, dtype=latent_model_input.dtype ) # This is predicts "v" from flow-matching or eps from diffusion noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, text_encoder_layers=prompt_layers, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, txt_ids=text_ids, img_ids=latent_image_ids, )[0] # perform guidance if guidance_scale > 1: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if XLA_AVAILABLE: xm.mark_step() if output_type == "latent": image = latents else: if do_patching: latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) else: latents = self._unpack_latents_no_patch(latents, height, width, self.vae_scale_factor) latents = latents.unsqueeze(dim=2) latents_device = latents[0].device latents_dtype = latents[0].dtype latents_mean = ( torch.tensor(self.vae.config.latents_mean) .view(1, self.vae.config.z_dim, 1, 1, 1) .to(latents_device, latents_dtype) ) latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to( latents_device, latents_dtype ) latents_scaled = [latent / latents_std + latents_mean for latent in latents] latents_scaled = torch.cat(latents_scaled, dim=0) image = [] for scaled_latent in latents_scaled: curr_image = self.vae.decode(scaled_latent.unsqueeze(0), return_dict=False)[0] curr_image = self.image_processor.postprocess(curr_image.squeeze(dim=2), output_type=output_type) image.append(curr_image) if len(image) == 1: image = image[0] else: image = np.stack(image, axis=0) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return BriaFiboPipelineOutput(images=image) def check_inputs( self, prompt, height, width, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if max_sequence_length is not None and max_sequence_length > 3000: raise ValueError(f"`max_sequence_length` cannot be greater than 3000 but is {max_sequence_length}")
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/bria_fibo/pipeline_bria_fibo.py", "license": "Apache License 2.0", "lines": 714, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
license
huggingface/diffusers:src/diffusers/pipelines/bria_fibo/pipeline_output.py
from dataclasses import dataclass import numpy as np import PIL.Image from ...utils import BaseOutput @dataclass class BriaFiboPipelineOutput(BaseOutput): """ Output class for BriaFibo pipelines. Args: images (`list[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: list[PIL.Image.Image, np.ndarray]
{ "repo_id": "huggingface/diffusers", "file_path": "src/diffusers/pipelines/bria_fibo/pipeline_output.py", "license": "Apache License 2.0", "lines": 14, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation