linoyts HF Staff commited on
Commit
eca89b4
·
verified ·
1 Parent(s): e3fceed

Upload pipeline.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. pipeline.py +1348 -0
pipeline.py ADDED
@@ -0,0 +1,1348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Lightricks and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ LTX-2 Audio-to-Video Pipeline with Video Conditioning Support
17
+
18
+ This is a modified version of the LTX2AudioToVideoPipeline that adds support for
19
+ video conditioning, enabling avatar/face-swap generation workflows.
20
+
21
+ Usage:
22
+ pipe = DiffusionPipeline.from_pretrained(
23
+ "rootonchair/LTX-2-19b-distilled",
24
+ custom_pipeline="path/to/this/file",
25
+ torch_dtype=torch.bfloat16
26
+ )
27
+
28
+ # With video conditioning (for avatar/face-swap):
29
+ video, audio = pipe(
30
+ image=face_image, # The face/appearance to use
31
+ video=reference_video, # Video for motion conditioning
32
+ audio="path/to/audio.wav", # Audio (or extracted from video)
33
+ prompt="head_swap, a person speaking...",
34
+ ...
35
+ )
36
+ """
37
+
38
+ import copy
39
+ import inspect
40
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
41
+
42
+ import numpy as np
43
+ import torch
44
+ import torchaudio
45
+ import torchaudio.transforms as T
46
+ from PIL import Image
47
+ from transformers import Gemma3ForConditionalGeneration, GemmaTokenizer, GemmaTokenizerFast
48
+
49
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
50
+ from diffusers.image_processor import PipelineImageInput
51
+ from diffusers.loaders import FromSingleFileMixin, LTXVideoLoraLoaderMixin
52
+ from diffusers.models.autoencoders import AutoencoderKLLTX2Audio, AutoencoderKLLTX2Video
53
+ from diffusers.models.transformers import LTX2VideoTransformer3DModel
54
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
55
+ from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring
56
+ from diffusers.utils.torch_utils import randn_tensor
57
+ from diffusers.video_processor import VideoProcessor
58
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
59
+ from diffusers.pipelines.ltx2.connectors import LTX2TextConnectors
60
+ from diffusers.pipelines.ltx2.pipeline_output import LTX2PipelineOutput
61
+ from diffusers.pipelines.ltx2.vocoder import LTX2Vocoder
62
+
63
+
64
+ if is_torch_xla_available():
65
+ import torch_xla.core.xla_model as xm
66
+ XLA_AVAILABLE = True
67
+ else:
68
+ XLA_AVAILABLE = False
69
+
70
+ logger = logging.get_logger(__name__)
71
+
72
+
73
+ EXAMPLE_DOC_STRING = """
74
+ Examples:
75
+ ```py
76
+ >>> import torch
77
+ >>> from diffusers import DiffusionPipeline
78
+ >>> from diffusers.utils import load_image
79
+
80
+ >>> pipe = DiffusionPipeline.from_pretrained(
81
+ ... "rootonchair/LTX-2-19b-distilled",
82
+ ... custom_pipeline="pipeline_ltx2_avatar",
83
+ ... torch_dtype=torch.bfloat16
84
+ ... )
85
+ >>> pipe.to("cuda")
86
+
87
+ >>> # Load face swap LoRA
88
+ >>> pipe.load_lora_weights(
89
+ ... "Alissonerdx/BFS-Best-Face-Swap-Video",
90
+ ... weight_name="ltx-2/head_swap_v1_13500_first_frame.safetensors",
91
+ ... )
92
+ >>> pipe.fuse_lora(lora_scale=1.1)
93
+
94
+ >>> face_image = load_image("face.png")
95
+ >>> video, audio = pipe(
96
+ ... image=face_image,
97
+ ... video="reference_video.mp4", # Motion reference
98
+ ... video_conditioning_strength=1.0, # How strongly to follow motion
99
+ ... video_conditioning_frame_idx=1, # Frame 0 = face, Frame 1+ = video motion
100
+ ... audio="reference_video.mp4", # Audio extracted from video
101
+ ... prompt="head_swap, a person speaking naturally",
102
+ ... width=512,
103
+ ... height=768,
104
+ ... num_frames=121,
105
+ ... return_dict=False,
106
+ ... )
107
+ ```
108
+ """
109
+
110
+
111
+ def retrieve_latents(
112
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
113
+ ):
114
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
115
+ return encoder_output.latent_dist.sample(generator)
116
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
117
+ return encoder_output.latent_dist.mode()
118
+ elif hasattr(encoder_output, "latents"):
119
+ return encoder_output.latents
120
+ else:
121
+ raise AttributeError("Could not access latents of provided encoder_output")
122
+
123
+
124
+ def calculate_shift(
125
+ image_seq_len,
126
+ base_seq_len: int = 256,
127
+ max_seq_len: int = 4096,
128
+ base_shift: float = 0.5,
129
+ max_shift: float = 1.15,
130
+ ):
131
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
132
+ b = base_shift - m * base_seq_len
133
+ mu = image_seq_len * m + b
134
+ return mu
135
+
136
+
137
+ def retrieve_timesteps(
138
+ scheduler,
139
+ num_inference_steps: Optional[int] = None,
140
+ device: Optional[Union[str, torch.device]] = None,
141
+ timesteps: Optional[List[int]] = None,
142
+ sigmas: Optional[List[float]] = None,
143
+ **kwargs,
144
+ ):
145
+ if timesteps is not None and sigmas is not None:
146
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed.")
147
+ if timesteps is not None:
148
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
149
+ if not accepts_timesteps:
150
+ raise ValueError(
151
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom timestep schedules."
152
+ )
153
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
154
+ timesteps = scheduler.timesteps
155
+ num_inference_steps = len(timesteps)
156
+ elif sigmas is not None:
157
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
158
+ if not accept_sigmas:
159
+ raise ValueError(
160
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom sigmas schedules."
161
+ )
162
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
163
+ timesteps = scheduler.timesteps
164
+ num_inference_steps = len(timesteps)
165
+ else:
166
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
167
+ timesteps = scheduler.timesteps
168
+ return timesteps, num_inference_steps
169
+
170
+
171
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
172
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
173
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
174
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
175
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
176
+ return noise_cfg
177
+
178
+
179
+ class LTX2AvatarPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixin):
180
+ r"""
181
+ Pipeline for avatar/face-swap video generation with audio and video conditioning.
182
+
183
+ This pipeline generates video conditioned on:
184
+ - An input image (the face/appearance to use)
185
+ - A reference video (for motion/pose conditioning)
186
+ - Input audio (for lip-sync)
187
+
188
+ This enables avatar generation where the face from the image is animated
189
+ to match the motion from the reference video and synced to the audio.
190
+ """
191
+
192
+ model_cpu_offload_seq = "text_encoder->connectors->transformer->vae->audio_vae->vocoder"
193
+ _optional_components = []
194
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
195
+
196
+ def __init__(
197
+ self,
198
+ scheduler: FlowMatchEulerDiscreteScheduler,
199
+ vae: AutoencoderKLLTX2Video,
200
+ audio_vae: AutoencoderKLLTX2Audio,
201
+ text_encoder: Gemma3ForConditionalGeneration,
202
+ tokenizer: Union[GemmaTokenizer, GemmaTokenizerFast],
203
+ connectors: LTX2TextConnectors,
204
+ transformer: LTX2VideoTransformer3DModel,
205
+ vocoder: LTX2Vocoder,
206
+ ):
207
+ super().__init__()
208
+
209
+ self.register_modules(
210
+ vae=vae,
211
+ audio_vae=audio_vae,
212
+ text_encoder=text_encoder,
213
+ tokenizer=tokenizer,
214
+ connectors=connectors,
215
+ transformer=transformer,
216
+ vocoder=vocoder,
217
+ scheduler=scheduler,
218
+ )
219
+
220
+ self.vae_spatial_compression_ratio = (
221
+ self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32
222
+ )
223
+ self.vae_temporal_compression_ratio = (
224
+ self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8
225
+ )
226
+ self.audio_vae_mel_compression_ratio = (
227
+ self.audio_vae.mel_compression_ratio if getattr(self, "audio_vae", None) is not None else 4
228
+ )
229
+ self.audio_vae_temporal_compression_ratio = (
230
+ self.audio_vae.temporal_compression_ratio if getattr(self, "audio_vae", None) is not None else 4
231
+ )
232
+ self.transformer_spatial_patch_size = (
233
+ self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1
234
+ )
235
+ self.transformer_temporal_patch_size = (
236
+ self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1
237
+ )
238
+
239
+ self.audio_sampling_rate = (
240
+ self.audio_vae.config.sample_rate if getattr(self, "audio_vae", None) is not None else 16000
241
+ )
242
+ self.audio_hop_length = (
243
+ self.audio_vae.config.mel_hop_length if getattr(self, "audio_vae", None) is not None else 160
244
+ )
245
+
246
+ self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio, resample="bilinear")
247
+ self.tokenizer_max_length = (
248
+ self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 1024
249
+ )
250
+
251
+ # ==================== Video Conditioning Methods ====================
252
+
253
+ def _load_video_frames(
254
+ self,
255
+ video: Union[str, List[Image.Image], torch.Tensor],
256
+ height: int,
257
+ width: int,
258
+ num_frames: int,
259
+ device: torch.device,
260
+ dtype: torch.dtype,
261
+ ) -> torch.Tensor:
262
+ """
263
+ Load and preprocess video frames for conditioning.
264
+
265
+ Args:
266
+ video: Path to video file, list of PIL images, or tensor of frames
267
+ height: Target height
268
+ width: Target width
269
+ num_frames: Number of frames to extract/use
270
+ device: Target device
271
+ dtype: Target dtype
272
+
273
+ Returns:
274
+ Tensor of shape (batch, channels, num_frames, height, width)
275
+ """
276
+ if isinstance(video, str):
277
+ # Load from file
278
+ frames = self._decode_video_file(video, num_frames)
279
+ elif isinstance(video, list):
280
+ # List of PIL images
281
+ frames = [np.array(img.convert("RGB")) for img in video]
282
+ elif isinstance(video, torch.Tensor):
283
+ # Already a tensor
284
+ if video.ndim == 4: # (F, H, W, C) or (F, C, H, W)
285
+ if video.shape[-1] in [1, 3, 4]: # (F, H, W, C)
286
+ frames = [video[i].cpu().numpy() for i in range(video.shape[0])]
287
+ else: # (F, C, H, W)
288
+ frames = [video[i].permute(1, 2, 0).cpu().numpy() for i in range(video.shape[0])]
289
+ else:
290
+ raise ValueError(f"Unexpected video tensor shape: {video.shape}")
291
+ else:
292
+ raise TypeError(f"Unsupported video type: {type(video)}")
293
+
294
+ # Handle frame count
295
+ if len(frames) >= num_frames:
296
+ frames = frames[:num_frames]
297
+ else:
298
+ # Pad with last frame
299
+ last_frame = frames[-1]
300
+ while len(frames) < num_frames:
301
+ frames.append(last_frame)
302
+
303
+ # Process each frame
304
+ processed_frames = []
305
+ for frame in frames:
306
+ if isinstance(frame, np.ndarray):
307
+ frame = Image.fromarray(frame.astype(np.uint8))
308
+
309
+ # Resize to target dimensions
310
+ frame = frame.resize((width, height), Image.LANCZOS)
311
+ frame = np.array(frame)
312
+
313
+ # Normalize to [-1, 1]
314
+ frame = (frame.astype(np.float32) / 127.5) - 1.0
315
+ processed_frames.append(frame)
316
+
317
+ # Stack frames: (F, H, W, C) -> (1, C, F, H, W)
318
+ frames_array = np.stack(processed_frames, axis=0) # (F, H, W, C)
319
+ frames_tensor = torch.from_numpy(frames_array).permute(3, 0, 1, 2).unsqueeze(0) # (1, C, F, H, W)
320
+
321
+ return frames_tensor.to(device=device, dtype=dtype)
322
+
323
+ def _decode_video_file(self, video_path: str, max_frames: int) -> List[np.ndarray]:
324
+ """Decode video file to list of numpy arrays."""
325
+ try:
326
+ import av
327
+ except ImportError:
328
+ raise ImportError("Please install av: pip install av")
329
+
330
+ frames = []
331
+ container = av.open(video_path)
332
+ try:
333
+ video_stream = next(s for s in container.streams if s.type == "video")
334
+ for frame in container.decode(video_stream):
335
+ frames.append(frame.to_rgb().to_ndarray())
336
+ if len(frames) >= max_frames:
337
+ break
338
+ finally:
339
+ container.close()
340
+
341
+ return frames
342
+
343
+ def _encode_video_conditioning(
344
+ self,
345
+ video: torch.Tensor,
346
+ generator: Optional[torch.Generator] = None,
347
+ ) -> torch.Tensor:
348
+ """
349
+ Encode video frames through the VAE to get latents.
350
+
351
+ Args:
352
+ video: Video tensor of shape (batch, channels, frames, height, width)
353
+ generator: Random generator for sampling
354
+
355
+ Returns:
356
+ Video latents
357
+ """
358
+ # Encode each frame through VAE
359
+ # VAE expects (batch, channels, frames, height, width)
360
+ video = video.to(self.vae.dtype)
361
+ latents = retrieve_latents(self.vae.encode(video), generator, "argmax")
362
+ return latents
363
+
364
+ # ==================== Text Encoding Methods ====================
365
+
366
+ @staticmethod
367
+ def _pack_text_embeds(
368
+ text_hidden_states: torch.Tensor,
369
+ sequence_lengths: torch.Tensor,
370
+ device: Union[str, torch.device],
371
+ padding_side: str = "left",
372
+ scale_factor: int = 8,
373
+ eps: float = 1e-6,
374
+ ) -> torch.Tensor:
375
+ batch_size, seq_len, hidden_dim, num_layers = text_hidden_states.shape
376
+ original_dtype = text_hidden_states.dtype
377
+
378
+ token_indices = torch.arange(seq_len, device=device).unsqueeze(0)
379
+ if padding_side == "right":
380
+ mask = token_indices < sequence_lengths[:, None]
381
+ elif padding_side == "left":
382
+ start_indices = seq_len - sequence_lengths[:, None]
383
+ mask = token_indices >= start_indices
384
+ else:
385
+ raise ValueError(f"padding_side must be 'left' or 'right', got {padding_side}")
386
+ mask = mask[:, :, None, None]
387
+
388
+ masked_text_hidden_states = text_hidden_states.masked_fill(~mask, 0.0)
389
+ num_valid_positions = (sequence_lengths * hidden_dim).view(batch_size, 1, 1, 1)
390
+ masked_mean = masked_text_hidden_states.sum(dim=(1, 2), keepdim=True) / (num_valid_positions + eps)
391
+
392
+ x_min = text_hidden_states.masked_fill(~mask, float("inf")).amin(dim=(1, 2), keepdim=True)
393
+ x_max = text_hidden_states.masked_fill(~mask, float("-inf")).amax(dim=(1, 2), keepdim=True)
394
+
395
+ normalized_hidden_states = (text_hidden_states - masked_mean) / (x_max - x_min + eps)
396
+ normalized_hidden_states = normalized_hidden_states * scale_factor
397
+
398
+ normalized_hidden_states = normalized_hidden_states.flatten(2)
399
+ mask_flat = mask.squeeze(-1).expand(-1, -1, hidden_dim * num_layers)
400
+ normalized_hidden_states = normalized_hidden_states.masked_fill(~mask_flat, 0.0)
401
+ normalized_hidden_states = normalized_hidden_states.to(dtype=original_dtype)
402
+ return normalized_hidden_states
403
+
404
+ def _get_gemma_prompt_embeds(
405
+ self,
406
+ prompt: Union[str, List[str]],
407
+ num_videos_per_prompt: int = 1,
408
+ max_sequence_length: int = 1024,
409
+ scale_factor: int = 8,
410
+ device: Optional[torch.device] = None,
411
+ dtype: Optional[torch.dtype] = None,
412
+ ):
413
+ device = device or self._execution_device
414
+ dtype = dtype or self.text_encoder.dtype
415
+
416
+ prompt = [prompt] if isinstance(prompt, str) else prompt
417
+ batch_size = len(prompt)
418
+
419
+ if getattr(self, "tokenizer", None) is not None:
420
+ self.tokenizer.padding_side = "left"
421
+ if self.tokenizer.pad_token is None:
422
+ self.tokenizer.pad_token = self.tokenizer.eos_token
423
+
424
+ prompt = [p.strip() for p in prompt]
425
+ text_inputs = self.tokenizer(
426
+ prompt,
427
+ padding="max_length",
428
+ max_length=max_sequence_length,
429
+ truncation=True,
430
+ add_special_tokens=True,
431
+ return_tensors="pt",
432
+ )
433
+ text_input_ids = text_inputs.input_ids
434
+ prompt_attention_mask = text_inputs.attention_mask
435
+ text_input_ids = text_input_ids.to(device)
436
+ prompt_attention_mask = prompt_attention_mask.to(device)
437
+
438
+ text_encoder_outputs = self.text_encoder(
439
+ input_ids=text_input_ids, attention_mask=prompt_attention_mask, output_hidden_states=True
440
+ )
441
+ text_encoder_hidden_states = text_encoder_outputs.hidden_states
442
+ text_encoder_hidden_states = torch.stack(text_encoder_hidden_states, dim=-1)
443
+ sequence_lengths = prompt_attention_mask.sum(dim=-1)
444
+
445
+ prompt_embeds = self._pack_text_embeds(
446
+ text_encoder_hidden_states,
447
+ sequence_lengths,
448
+ device=device,
449
+ padding_side=self.tokenizer.padding_side,
450
+ scale_factor=scale_factor,
451
+ )
452
+ prompt_embeds = prompt_embeds.to(dtype=dtype)
453
+
454
+ _, seq_len, _ = prompt_embeds.shape
455
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
456
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
457
+
458
+ prompt_attention_mask = prompt_attention_mask.view(batch_size, -1)
459
+ prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1)
460
+
461
+ return prompt_embeds, prompt_attention_mask
462
+
463
+ def encode_prompt(
464
+ self,
465
+ prompt: Union[str, List[str]],
466
+ negative_prompt: Optional[Union[str, List[str]]] = None,
467
+ do_classifier_free_guidance: bool = True,
468
+ num_videos_per_prompt: int = 1,
469
+ prompt_embeds: Optional[torch.Tensor] = None,
470
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
471
+ prompt_attention_mask: Optional[torch.Tensor] = None,
472
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
473
+ max_sequence_length: int = 1024,
474
+ scale_factor: int = 8,
475
+ device: Optional[torch.device] = None,
476
+ dtype: Optional[torch.dtype] = None,
477
+ ):
478
+ device = device or self._execution_device
479
+
480
+ prompt = [prompt] if isinstance(prompt, str) else prompt
481
+ if prompt is not None:
482
+ batch_size = len(prompt)
483
+ else:
484
+ batch_size = prompt_embeds.shape[0]
485
+
486
+ if prompt_embeds is None:
487
+ prompt_embeds, prompt_attention_mask = self._get_gemma_prompt_embeds(
488
+ prompt=prompt,
489
+ num_videos_per_prompt=num_videos_per_prompt,
490
+ max_sequence_length=max_sequence_length,
491
+ scale_factor=scale_factor,
492
+ device=device,
493
+ dtype=dtype,
494
+ )
495
+
496
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
497
+ negative_prompt = negative_prompt or ""
498
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
499
+
500
+ if prompt is not None and type(prompt) is not type(negative_prompt):
501
+ raise TypeError(
502
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}."
503
+ )
504
+ elif batch_size != len(negative_prompt):
505
+ raise ValueError(
506
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}."
507
+ )
508
+
509
+ negative_prompt_embeds, negative_prompt_attention_mask = self._get_gemma_prompt_embeds(
510
+ prompt=negative_prompt,
511
+ num_videos_per_prompt=num_videos_per_prompt,
512
+ max_sequence_length=max_sequence_length,
513
+ scale_factor=scale_factor,
514
+ device=device,
515
+ dtype=dtype,
516
+ )
517
+
518
+ return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask
519
+
520
+ def check_inputs(
521
+ self,
522
+ prompt,
523
+ height,
524
+ width,
525
+ callback_on_step_end_tensor_inputs=None,
526
+ prompt_embeds=None,
527
+ negative_prompt_embeds=None,
528
+ prompt_attention_mask=None,
529
+ negative_prompt_attention_mask=None,
530
+ ):
531
+ if height % 32 != 0 or width % 32 != 0:
532
+ raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.")
533
+
534
+ if callback_on_step_end_tensor_inputs is not None and not all(
535
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
536
+ ):
537
+ raise ValueError(
538
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}"
539
+ )
540
+
541
+ if prompt is not None and prompt_embeds is not None:
542
+ raise ValueError("Cannot forward both `prompt` and `prompt_embeds`.")
543
+ elif prompt is None and prompt_embeds is None:
544
+ raise ValueError("Provide either `prompt` or `prompt_embeds`.")
545
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
546
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
547
+
548
+ if prompt_embeds is not None and prompt_attention_mask is None:
549
+ raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.")
550
+
551
+ if negative_prompt_embeds is not None and negative_prompt_attention_mask is None:
552
+ raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.")
553
+
554
+ # ==================== Latent Packing/Unpacking ====================
555
+
556
+ @staticmethod
557
+ def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor:
558
+ batch_size, num_channels, num_frames, height, width = latents.shape
559
+ post_patch_num_frames = num_frames // patch_size_t
560
+ post_patch_height = height // patch_size
561
+ post_patch_width = width // patch_size
562
+ latents = latents.reshape(
563
+ batch_size,
564
+ -1,
565
+ post_patch_num_frames,
566
+ patch_size_t,
567
+ post_patch_height,
568
+ patch_size,
569
+ post_patch_width,
570
+ patch_size,
571
+ )
572
+ latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3)
573
+ return latents
574
+
575
+ @staticmethod
576
+ def _unpack_latents(
577
+ latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1
578
+ ) -> torch.Tensor:
579
+ batch_size = latents.size(0)
580
+ latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size)
581
+ latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3)
582
+ return latents
583
+
584
+ @staticmethod
585
+ def _normalize_latents(
586
+ latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0
587
+ ) -> torch.Tensor:
588
+ latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype)
589
+ latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype)
590
+ latents = (latents - latents_mean) * scaling_factor / latents_std
591
+ return latents
592
+
593
+ @staticmethod
594
+ def _denormalize_latents(
595
+ latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0
596
+ ) -> torch.Tensor:
597
+ latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype)
598
+ latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype)
599
+ latents = latents * latents_std / scaling_factor + latents_mean
600
+ return latents
601
+
602
+ # ==================== Audio Latent Methods ====================
603
+
604
+ @staticmethod
605
+ def _pack_audio_latents(
606
+ latents: torch.Tensor, patch_size: Optional[int] = None, patch_size_t: Optional[int] = None
607
+ ) -> torch.Tensor:
608
+ if patch_size is not None and patch_size_t is not None:
609
+ batch_size, num_channels, latent_length, latent_mel_bins = latents.shape
610
+ post_patch_latent_length = latent_length / patch_size_t
611
+ post_patch_mel_bins = latent_mel_bins / patch_size
612
+ latents = latents.reshape(
613
+ batch_size, -1, post_patch_latent_length, patch_size_t, post_patch_mel_bins, patch_size
614
+ )
615
+ latents = latents.permute(0, 2, 4, 1, 3, 5).flatten(3, 5).flatten(1, 2)
616
+ else:
617
+ latents = latents.transpose(1, 2).flatten(2, 3)
618
+ return latents
619
+
620
+ @staticmethod
621
+ def _unpack_audio_latents(
622
+ latents: torch.Tensor,
623
+ latent_length: int,
624
+ num_mel_bins: int,
625
+ patch_size: Optional[int] = None,
626
+ patch_size_t: Optional[int] = None,
627
+ ) -> torch.Tensor:
628
+ if patch_size is not None and patch_size_t is not None:
629
+ batch_size = latents.size(0)
630
+ latents = latents.reshape(batch_size, latent_length, num_mel_bins, -1, patch_size_t, patch_size)
631
+ latents = latents.permute(0, 3, 1, 4, 2, 5).flatten(4, 5).flatten(2, 3)
632
+ else:
633
+ latents = latents.unflatten(2, (-1, num_mel_bins)).transpose(1, 2)
634
+ return latents
635
+
636
+ @staticmethod
637
+ def _denormalize_audio_latents(latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor):
638
+ latents_mean = latents_mean.to(latents.device, latents.dtype)
639
+ latents_std = latents_std.to(latents.device, latents.dtype)
640
+ return (latents * latents_std) + latents_mean
641
+
642
+ @staticmethod
643
+ def _normalize_audio_latents(latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor):
644
+ latents_mean = latents_mean.to(latents.device, latents.dtype)
645
+ latents_std = latents_std.to(latents.device, latents.dtype)
646
+ return (latents - latents_mean) / latents_std
647
+
648
+ @staticmethod
649
+ def _patchify_audio_latents(latents: torch.Tensor) -> torch.Tensor:
650
+ batch, channels, time, freq = latents.shape
651
+ return latents.permute(0, 2, 1, 3).reshape(batch, time, channels * freq)
652
+
653
+ @staticmethod
654
+ def _unpatchify_audio_latents(latents: torch.Tensor, channels: int, freq: int) -> torch.Tensor:
655
+ batch, time, _ = latents.shape
656
+ return latents.reshape(batch, time, channels, freq).permute(0, 2, 1, 3)
657
+
658
+ def _preprocess_audio(self, audio: Union[str, torch.Tensor], target_sample_rate: int) -> torch.Tensor:
659
+ """Process audio to mel spectrogram."""
660
+ if isinstance(audio, str):
661
+ waveform, sr = torchaudio.load(audio)
662
+ else:
663
+ waveform = audio
664
+ sr = target_sample_rate
665
+
666
+ if sr != target_sample_rate:
667
+ waveform = torchaudio.functional.resample(waveform, sr, target_sample_rate)
668
+
669
+ if waveform.shape[0] == 1:
670
+ waveform = waveform.repeat(2, 1)
671
+ elif waveform.shape[0] > 2:
672
+ waveform = waveform[:2, :]
673
+
674
+ waveform = waveform.unsqueeze(0)
675
+
676
+ n_fft = 1024
677
+ mel_transform = T.MelSpectrogram(
678
+ sample_rate=target_sample_rate,
679
+ n_fft=n_fft,
680
+ win_length=n_fft,
681
+ hop_length=self.audio_hop_length,
682
+ f_min=0.0,
683
+ f_max=target_sample_rate / 2.0,
684
+ n_mels=self.audio_vae.config.mel_bins,
685
+ window_fn=torch.hann_window,
686
+ center=True,
687
+ pad_mode="reflect",
688
+ power=1.0,
689
+ mel_scale="slaney",
690
+ norm="slaney",
691
+ )
692
+
693
+ mel_spec = mel_transform(waveform)
694
+ mel_spec = torch.log(torch.clamp(mel_spec, min=1e-5))
695
+ mel_spec = mel_spec.permute(0, 1, 3, 2).contiguous()
696
+
697
+ return mel_spec
698
+
699
+ # ==================== Latent Preparation ====================
700
+
701
+ def prepare_latents(
702
+ self,
703
+ image: Optional[torch.Tensor] = None,
704
+ video: Optional[torch.Tensor] = None,
705
+ video_conditioning_strength: float = 1.0,
706
+ video_conditioning_frame_idx: int = 1,
707
+ batch_size: int = 1,
708
+ num_channels_latents: int = 128,
709
+ height: int = 512,
710
+ width: int = 704,
711
+ num_frames: int = 161,
712
+ dtype: Optional[torch.dtype] = None,
713
+ device: Optional[torch.device] = None,
714
+ generator: Optional[torch.Generator] = None,
715
+ latents: Optional[torch.Tensor] = None,
716
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
717
+ """
718
+ Prepare latents for generation with optional video conditioning.
719
+
720
+ Args:
721
+ image: Input image for frame 0 conditioning
722
+ video: Video tensor for motion conditioning
723
+ video_conditioning_strength: Strength of video conditioning (0-1)
724
+ video_conditioning_frame_idx: Frame index where video conditioning starts.
725
+ - 0: Video conditioning replaces all frames including frame 0
726
+ - 1: Frame 0 is image-conditioned, frames 1+ are video-conditioned (default for face-swap)
727
+ - N: Frames 0 to N-1 are image/noise, frames N+ are video-conditioned
728
+ ... other args ...
729
+ """
730
+ latent_height = height // self.vae_spatial_compression_ratio
731
+ latent_width = width // self.vae_spatial_compression_ratio
732
+ latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1
733
+
734
+ shape = (batch_size, num_channels_latents, latent_num_frames, latent_height, latent_width)
735
+ mask_shape = (batch_size, 1, latent_num_frames, latent_height, latent_width)
736
+
737
+ if latents is not None:
738
+ conditioning_mask = latents.new_zeros(mask_shape)
739
+ conditioning_mask[:, :, 0] = 1.0
740
+ conditioning_mask = self._pack_latents(
741
+ conditioning_mask, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size
742
+ ).squeeze(-1)
743
+ return latents.to(device=device, dtype=dtype), conditioning_mask
744
+
745
+ # Initialize conditioning mask (all zeros = fully denoise)
746
+ conditioning_mask = torch.zeros(mask_shape, device=device, dtype=dtype)
747
+
748
+ # Initialize latents tensor
749
+ init_latents = torch.zeros(shape, device=device, dtype=dtype)
750
+
751
+ # Case 1: Video conditioning (motion from reference video)
752
+ if video is not None:
753
+ # Encode video through VAE
754
+ video_latents = self._encode_video_conditioning(video, generator)
755
+ video_latents = self._normalize_latents(video_latents, self.vae.latents_mean, self.vae.latents_std)
756
+
757
+ # Ensure video latents match target shape
758
+ if video_latents.shape[2] < latent_num_frames:
759
+ # Pad with last frame
760
+ pad_frames = latent_num_frames - video_latents.shape[2]
761
+ last_frame = video_latents[:, :, -1:, :, :]
762
+ video_latents = torch.cat([video_latents, last_frame.repeat(1, 1, pad_frames, 1, 1)], dim=2)
763
+ elif video_latents.shape[2] > latent_num_frames:
764
+ video_latents = video_latents[:, :, :latent_num_frames, :, :]
765
+
766
+ # Calculate the latent frame index for video conditioning
767
+ # video_conditioning_frame_idx is in pixel space, convert to latent space
768
+ latent_video_start_idx = video_conditioning_frame_idx // self.vae_temporal_compression_ratio
769
+ latent_video_start_idx = min(latent_video_start_idx, latent_num_frames - 1)
770
+
771
+ # Apply video conditioning starting from the specified frame index
772
+ # Video frames are placed starting at latent_video_start_idx
773
+ num_video_frames_to_use = latent_num_frames - latent_video_start_idx
774
+ init_latents[:, :, latent_video_start_idx:, :, :] = video_latents[:, :, :num_video_frames_to_use, :, :]
775
+
776
+ # Set conditioning mask for video frames
777
+ # strength=1.0 means fully conditioned (no denoising), strength=0.0 means fully denoised
778
+ conditioning_mask[:, :, latent_video_start_idx:] = video_conditioning_strength
779
+
780
+ # Handle image conditioning for frame 0
781
+ if image is not None:
782
+ if isinstance(generator, list):
783
+ image_latents = [
784
+ retrieve_latents(self.vae.encode(image[i].unsqueeze(0).unsqueeze(2)), generator[i], "argmax")
785
+ for i in range(batch_size)
786
+ ]
787
+ else:
788
+ image_latents = [
789
+ retrieve_latents(self.vae.encode(img.unsqueeze(0).unsqueeze(2)), generator, "argmax")
790
+ for img in image
791
+ ]
792
+ image_latents = torch.cat(image_latents, dim=0).to(dtype)
793
+ image_latents = self._normalize_latents(image_latents, self.vae.latents_mean, self.vae.latents_std)
794
+
795
+ # Replace frame 0 with image latents (face appearance)
796
+ init_latents[:, :, 0:1, :, :] = image_latents
797
+ # Frame 0 is fully conditioned
798
+ conditioning_mask[:, :, 0] = 1.0
799
+
800
+ # If no video conditioning, repeat image for all frames (image-to-video mode)
801
+ if video is None:
802
+ init_latents = image_latents.repeat(1, 1, latent_num_frames, 1, 1)
803
+
804
+ # Generate noise
805
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
806
+
807
+ # Blend: conditioned regions keep init_latents, unconditioned regions get noise
808
+ latents = init_latents * conditioning_mask + noise * (1 - conditioning_mask)
809
+
810
+ # Pack for transformer
811
+ conditioning_mask = self._pack_latents(
812
+ conditioning_mask, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size
813
+ ).squeeze(-1)
814
+ latents = self._pack_latents(
815
+ latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size
816
+ )
817
+
818
+ return latents, conditioning_mask
819
+
820
+ def prepare_audio_latents(
821
+ self,
822
+ batch_size: int = 1,
823
+ num_channels_latents: int = 8,
824
+ num_mel_bins: int = 64,
825
+ num_frames: int = 121,
826
+ frame_rate: float = 25.0,
827
+ sampling_rate: int = 16000,
828
+ hop_length: int = 160,
829
+ dtype: Optional[torch.dtype] = None,
830
+ device: Optional[torch.device] = None,
831
+ generator: Optional[torch.Generator] = None,
832
+ audio_input: Optional[Union[str, torch.Tensor]] = None,
833
+ latents: Optional[torch.Tensor] = None,
834
+ ) -> Tuple[torch.Tensor, int, Optional[torch.Tensor]]:
835
+ duration_s = num_frames / frame_rate
836
+ latents_per_second = (
837
+ float(sampling_rate) / float(hop_length) / float(self.audio_vae_temporal_compression_ratio)
838
+ )
839
+ target_length = round(duration_s * latents_per_second)
840
+
841
+ if latents is not None:
842
+ return latents.to(device=device, dtype=dtype), target_length, None
843
+
844
+ latent_mel_bins = num_mel_bins // self.audio_vae_mel_compression_ratio
845
+
846
+ if audio_input is not None:
847
+ mel_spec = self._preprocess_audio(audio_input, sampling_rate).to(device=device)
848
+ mel_spec = mel_spec.to(dtype=self.audio_vae.dtype)
849
+ init_latents = self.audio_vae.encode(mel_spec).latent_dist.sample(generator)
850
+ init_latents = init_latents.to(dtype=dtype)
851
+
852
+ latent_channels = init_latents.shape[1]
853
+ latent_freq = init_latents.shape[3]
854
+ init_latents_patched = self._patchify_audio_latents(init_latents)
855
+ init_latents_patched = self._normalize_audio_latents(
856
+ init_latents_patched, self.audio_vae.latents_mean, self.audio_vae.latents_std
857
+ )
858
+ init_latents = self._unpatchify_audio_latents(init_latents_patched, latent_channels, latent_freq)
859
+
860
+ current_len = init_latents.shape[2]
861
+ if current_len < target_length:
862
+ padding = target_length - current_len
863
+ init_latents = torch.nn.functional.pad(init_latents, (0, 0, 0, padding))
864
+ elif current_len > target_length:
865
+ init_latents = init_latents[:, :, :target_length, :]
866
+
867
+ noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
868
+
869
+ if init_latents.shape[0] != batch_size:
870
+ init_latents = init_latents.repeat(batch_size, 1, 1, 1)
871
+ noise = noise.repeat(batch_size, 1, 1, 1)
872
+
873
+ packed_noise = self._pack_audio_latents(noise)
874
+
875
+ return packed_noise, target_length, init_latents
876
+
877
+ shape = (batch_size, num_channels_latents, target_length, latent_mel_bins)
878
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
879
+ latents = self._pack_audio_latents(latents)
880
+
881
+ return latents, target_length, None
882
+
883
+ # ==================== Properties ====================
884
+
885
+ @property
886
+ def guidance_scale(self):
887
+ return self._guidance_scale
888
+
889
+ @property
890
+ def guidance_rescale(self):
891
+ return self._guidance_rescale
892
+
893
+ @property
894
+ def do_classifier_free_guidance(self):
895
+ return self._guidance_scale > 1.0
896
+
897
+ @property
898
+ def num_timesteps(self):
899
+ return self._num_timesteps
900
+
901
+ @property
902
+ def current_timestep(self):
903
+ return self._current_timestep
904
+
905
+ @property
906
+ def attention_kwargs(self):
907
+ return self._attention_kwargs
908
+
909
+ @property
910
+ def interrupt(self):
911
+ return self._interrupt
912
+
913
+ def _get_audio_duration(self, audio: Union[str, torch.Tensor], sample_rate: int) -> float:
914
+ if isinstance(audio, str):
915
+ info = torchaudio.info(audio)
916
+ return info.num_frames / info.sample_rate
917
+ else:
918
+ num_samples = audio.shape[-1]
919
+ return num_samples / sample_rate
920
+
921
+ # ==================== Main Call ====================
922
+
923
+ @torch.no_grad()
924
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
925
+ def __call__(
926
+ self,
927
+ image: PipelineImageInput = None,
928
+ video: Optional[Union[str, List[Image.Image], torch.Tensor]] = None,
929
+ video_conditioning_strength: float = 1.0,
930
+ video_conditioning_frame_idx: int = 1,
931
+ audio: Optional[Union[str, torch.Tensor]] = None,
932
+ prompt: Union[str, List[str]] = None,
933
+ negative_prompt: Optional[Union[str, List[str]]] = None,
934
+ height: int = 512,
935
+ width: int = 768,
936
+ num_frames: Optional[int] = None,
937
+ max_frames: int = 257,
938
+ frame_rate: float = 24.0,
939
+ num_inference_steps: int = 40,
940
+ timesteps: List[int] = None,
941
+ sigmas: Optional[List[float]] = None,
942
+ guidance_scale: float = 4.0,
943
+ guidance_rescale: float = 0.0,
944
+ num_videos_per_prompt: Optional[int] = 1,
945
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
946
+ latents: Optional[torch.Tensor] = None,
947
+ audio_latents: Optional[torch.Tensor] = None,
948
+ prompt_embeds: Optional[torch.Tensor] = None,
949
+ prompt_attention_mask: Optional[torch.Tensor] = None,
950
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
951
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
952
+ decode_timestep: Union[float, List[float]] = 0.0,
953
+ decode_noise_scale: Optional[Union[float, List[float]]] = None,
954
+ output_type: Optional[str] = "pil",
955
+ return_dict: bool = True,
956
+ attention_kwargs: Optional[Dict[str, Any]] = None,
957
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
958
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
959
+ max_sequence_length: int = 1024,
960
+ ):
961
+ r"""
962
+ Generate avatar video with audio and optional video conditioning.
963
+
964
+ Args:
965
+ image (`PipelineImageInput`):
966
+ The input image (face/appearance) to condition frame 0.
967
+ video (`str`, `List[PIL.Image]`, or `torch.Tensor`, *optional*):
968
+ Reference video for motion conditioning. Can be:
969
+ - Path to a video file
970
+ - List of PIL Images
971
+ - Tensor of shape (F, H, W, C) or (F, C, H, W)
972
+ video_conditioning_strength (`float`, *optional*, defaults to 1.0):
973
+ How strongly to condition on the reference video (0.0-1.0).
974
+ 1.0 = fully conditioned, 0.0 = no conditioning.
975
+ video_conditioning_frame_idx (`int`, *optional*, defaults to 1):
976
+ Frame index where video conditioning starts (in pixel/frame space).
977
+ - 0: Video conditioning replaces all frames including frame 0
978
+ - 1: Frame 0 is image-conditioned, frames 1+ are video-conditioned (default for face-swap)
979
+ - N: Frames 0 to N-1 are image/noise, frames N+ are video-conditioned
980
+ audio (`str` or `torch.Tensor`, *optional*):
981
+ Audio for lip-sync. Can be path to audio/video file or waveform tensor.
982
+ prompt (`str` or `List[str]`, *optional*):
983
+ Text prompt. For face-swap, include "head_swap" trigger.
984
+ ... (other args same as base pipeline) ...
985
+
986
+ Returns:
987
+ [`LTX2PipelineOutput`] or `tuple`: Generated video and audio.
988
+ """
989
+
990
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
991
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
992
+
993
+ # Calculate num_frames from audio duration if not provided
994
+ if num_frames is None:
995
+ if audio is not None:
996
+ audio_duration = self._get_audio_duration(audio, self.audio_sampling_rate)
997
+ calculated_frames = int(audio_duration * frame_rate) + 1
998
+ num_frames = min(calculated_frames, max_frames)
999
+ num_frames = ((num_frames - 1) // self.vae_temporal_compression_ratio) * self.vae_temporal_compression_ratio + 1
1000
+ num_frames = max(num_frames, 9)
1001
+ logger.info(f"Audio duration: {audio_duration:.2f}s -> num_frames: {num_frames}")
1002
+ else:
1003
+ num_frames = 121
1004
+
1005
+ self.check_inputs(
1006
+ prompt=prompt,
1007
+ height=height,
1008
+ width=width,
1009
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
1010
+ prompt_embeds=prompt_embeds,
1011
+ negative_prompt_embeds=negative_prompt_embeds,
1012
+ prompt_attention_mask=prompt_attention_mask,
1013
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
1014
+ )
1015
+
1016
+ self._guidance_scale = guidance_scale
1017
+ self._guidance_rescale = guidance_rescale
1018
+ self._attention_kwargs = attention_kwargs
1019
+ self._interrupt = False
1020
+ self._current_timestep = None
1021
+
1022
+ if prompt is not None and isinstance(prompt, str):
1023
+ batch_size = 1
1024
+ elif prompt is not None and isinstance(prompt, list):
1025
+ batch_size = len(prompt)
1026
+ else:
1027
+ batch_size = prompt_embeds.shape[0]
1028
+
1029
+ device = self._execution_device
1030
+
1031
+ # Encode prompts
1032
+ (
1033
+ prompt_embeds,
1034
+ prompt_attention_mask,
1035
+ negative_prompt_embeds,
1036
+ negative_prompt_attention_mask,
1037
+ ) = self.encode_prompt(
1038
+ prompt=prompt,
1039
+ negative_prompt=negative_prompt,
1040
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1041
+ num_videos_per_prompt=num_videos_per_prompt,
1042
+ prompt_embeds=prompt_embeds,
1043
+ negative_prompt_embeds=negative_prompt_embeds,
1044
+ prompt_attention_mask=prompt_attention_mask,
1045
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
1046
+ max_sequence_length=max_sequence_length,
1047
+ device=device,
1048
+ )
1049
+ if self.do_classifier_free_guidance:
1050
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1051
+ prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
1052
+
1053
+ additive_attention_mask = (1 - prompt_attention_mask.to(prompt_embeds.dtype)) * -1000000.0
1054
+ connector_prompt_embeds, connector_audio_prompt_embeds, connector_attention_mask = self.connectors(
1055
+ prompt_embeds, additive_attention_mask, additive_mask=True
1056
+ )
1057
+
1058
+ # Preprocess image
1059
+ if latents is None and image is not None:
1060
+ image = self.video_processor.preprocess(image, height=height, width=width)
1061
+ image = image.to(device=device, dtype=prompt_embeds.dtype)
1062
+
1063
+ # Preprocess video conditioning
1064
+ video_tensor = None
1065
+ if video is not None:
1066
+ video_tensor = self._load_video_frames(
1067
+ video=video,
1068
+ height=height,
1069
+ width=width,
1070
+ num_frames=num_frames,
1071
+ device=device,
1072
+ dtype=prompt_embeds.dtype,
1073
+ )
1074
+
1075
+ # Prepare latents with video conditioning
1076
+ num_channels_latents = self.transformer.config.in_channels
1077
+ latents, conditioning_mask = self.prepare_latents(
1078
+ image=image,
1079
+ video=video_tensor,
1080
+ video_conditioning_strength=video_conditioning_strength,
1081
+ video_conditioning_frame_idx=video_conditioning_frame_idx,
1082
+ batch_size=batch_size * num_videos_per_prompt,
1083
+ num_channels_latents=num_channels_latents,
1084
+ height=height,
1085
+ width=width,
1086
+ num_frames=num_frames,
1087
+ dtype=torch.float32,
1088
+ device=device,
1089
+ generator=generator,
1090
+ latents=latents,
1091
+ )
1092
+ if self.do_classifier_free_guidance:
1093
+ conditioning_mask = torch.cat([conditioning_mask, conditioning_mask])
1094
+
1095
+ # Prepare audio latents
1096
+ num_mel_bins = self.audio_vae.config.mel_bins if getattr(self, "audio_vae", None) is not None else 64
1097
+ latent_mel_bins = num_mel_bins // self.audio_vae_mel_compression_ratio
1098
+
1099
+ num_channels_latents_audio = (
1100
+ self.audio_vae.config.latent_channels if getattr(self, "audio_vae", None) is not None else 8
1101
+ )
1102
+
1103
+ audio_latents, audio_num_frames, clean_audio_latents = self.prepare_audio_latents(
1104
+ batch_size * num_videos_per_prompt,
1105
+ num_channels_latents=num_channels_latents_audio,
1106
+ num_mel_bins=num_mel_bins,
1107
+ num_frames=num_frames,
1108
+ frame_rate=frame_rate,
1109
+ sampling_rate=self.audio_sampling_rate,
1110
+ hop_length=self.audio_hop_length,
1111
+ dtype=torch.float32,
1112
+ device=device,
1113
+ generator=generator,
1114
+ latents=audio_latents,
1115
+ audio_input=audio,
1116
+ )
1117
+
1118
+ packed_clean_audio_latents = None
1119
+ if clean_audio_latents is not None:
1120
+ packed_clean_audio_latents = self._pack_audio_latents(clean_audio_latents)
1121
+
1122
+ latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1
1123
+ latent_height = height // self.vae_spatial_compression_ratio
1124
+ latent_width = width // self.vae_spatial_compression_ratio
1125
+ video_sequence_length = latent_num_frames * latent_height * latent_width
1126
+
1127
+ if sigmas is None:
1128
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
1129
+
1130
+ mu = calculate_shift(
1131
+ video_sequence_length,
1132
+ self.scheduler.config.get("base_image_seq_len", 1024),
1133
+ self.scheduler.config.get("max_image_seq_len", 4096),
1134
+ self.scheduler.config.get("base_shift", 0.95),
1135
+ self.scheduler.config.get("max_shift", 2.05),
1136
+ )
1137
+
1138
+ audio_scheduler = copy.deepcopy(self.scheduler)
1139
+ _, _ = retrieve_timesteps(
1140
+ audio_scheduler,
1141
+ num_inference_steps,
1142
+ device,
1143
+ timesteps,
1144
+ sigmas=sigmas,
1145
+ mu=mu,
1146
+ )
1147
+ timesteps, num_inference_steps = retrieve_timesteps(
1148
+ self.scheduler,
1149
+ num_inference_steps,
1150
+ device,
1151
+ timesteps,
1152
+ sigmas=sigmas,
1153
+ mu=mu,
1154
+ )
1155
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1156
+ self._num_timesteps = len(timesteps)
1157
+
1158
+ rope_interpolation_scale = (
1159
+ self.vae_temporal_compression_ratio / frame_rate,
1160
+ self.vae_spatial_compression_ratio,
1161
+ self.vae_spatial_compression_ratio,
1162
+ )
1163
+ video_coords = self.transformer.rope.prepare_video_coords(
1164
+ latents.shape[0], latent_num_frames, latent_height, latent_width, latents.device, fps=frame_rate
1165
+ )
1166
+ audio_coords = self.transformer.audio_rope.prepare_audio_coords(
1167
+ audio_latents.shape[0], audio_num_frames, audio_latents.device
1168
+ )
1169
+
1170
+ # Denoising loop
1171
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1172
+ for i, t in enumerate(timesteps):
1173
+ if self.interrupt:
1174
+ continue
1175
+
1176
+ self._current_timestep = t
1177
+
1178
+ if packed_clean_audio_latents is not None:
1179
+ audio_latents_input = packed_clean_audio_latents.to(dtype=prompt_embeds.dtype)
1180
+ else:
1181
+ audio_latents_input = audio_latents.to(dtype=prompt_embeds.dtype)
1182
+
1183
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1184
+ latent_model_input = latent_model_input.to(prompt_embeds.dtype)
1185
+ audio_latent_model_input = (
1186
+ torch.cat([audio_latents_input] * 2) if self.do_classifier_free_guidance else audio_latents_input
1187
+ )
1188
+ audio_latent_model_input = audio_latent_model_input.to(prompt_embeds.dtype)
1189
+
1190
+ timestep = t.expand(latent_model_input.shape[0])
1191
+ video_timestep = timestep.unsqueeze(-1) * (1 - conditioning_mask)
1192
+
1193
+ if packed_clean_audio_latents is not None:
1194
+ audio_timestep = torch.zeros_like(timestep)
1195
+ else:
1196
+ audio_timestep = timestep
1197
+
1198
+ with self.transformer.cache_context("cond_uncond"):
1199
+ noise_pred_video, noise_pred_audio = self.transformer(
1200
+ hidden_states=latent_model_input,
1201
+ audio_hidden_states=audio_latent_model_input,
1202
+ encoder_hidden_states=connector_prompt_embeds,
1203
+ audio_encoder_hidden_states=connector_audio_prompt_embeds,
1204
+ timestep=video_timestep,
1205
+ audio_timestep=audio_timestep,
1206
+ encoder_attention_mask=connector_attention_mask,
1207
+ audio_encoder_attention_mask=connector_attention_mask,
1208
+ num_frames=latent_num_frames,
1209
+ height=latent_height,
1210
+ width=latent_width,
1211
+ fps=frame_rate,
1212
+ audio_num_frames=audio_num_frames,
1213
+ video_coords=video_coords,
1214
+ audio_coords=audio_coords,
1215
+ attention_kwargs=attention_kwargs,
1216
+ return_dict=False,
1217
+ )
1218
+ noise_pred_video = noise_pred_video.float()
1219
+ noise_pred_audio = noise_pred_audio.float()
1220
+
1221
+ if self.do_classifier_free_guidance:
1222
+ noise_pred_video_uncond, noise_pred_video_text = noise_pred_video.chunk(2)
1223
+ noise_pred_video = noise_pred_video_uncond + self.guidance_scale * (
1224
+ noise_pred_video_text - noise_pred_video_uncond
1225
+ )
1226
+
1227
+ noise_pred_audio_uncond, noise_pred_audio_text = noise_pred_audio.chunk(2)
1228
+ noise_pred_audio = noise_pred_audio_uncond + self.guidance_scale * (
1229
+ noise_pred_audio_text - noise_pred_audio_uncond
1230
+ )
1231
+
1232
+ if self.guidance_rescale > 0:
1233
+ noise_pred_video = rescale_noise_cfg(
1234
+ noise_pred_video, noise_pred_video_text, guidance_rescale=self.guidance_rescale
1235
+ )
1236
+ noise_pred_audio = rescale_noise_cfg(
1237
+ noise_pred_audio, noise_pred_audio_text, guidance_rescale=self.guidance_rescale
1238
+ )
1239
+
1240
+ noise_pred_video = self._unpack_latents(
1241
+ noise_pred_video,
1242
+ latent_num_frames,
1243
+ latent_height,
1244
+ latent_width,
1245
+ self.transformer_spatial_patch_size,
1246
+ self.transformer_temporal_patch_size,
1247
+ )
1248
+ latents = self._unpack_latents(
1249
+ latents,
1250
+ latent_num_frames,
1251
+ latent_height,
1252
+ latent_width,
1253
+ self.transformer_spatial_patch_size,
1254
+ self.transformer_temporal_patch_size,
1255
+ )
1256
+
1257
+ noise_pred_video = noise_pred_video[:, :, 1:]
1258
+ noise_latents = latents[:, :, 1:]
1259
+ pred_latents = self.scheduler.step(noise_pred_video, t, noise_latents, return_dict=False)[0]
1260
+
1261
+ latents = torch.cat([latents[:, :, :1], pred_latents], dim=2)
1262
+ latents = self._pack_latents(
1263
+ latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size
1264
+ )
1265
+
1266
+ if packed_clean_audio_latents is None:
1267
+ audio_latents = audio_scheduler.step(noise_pred_audio, t, audio_latents, return_dict=False)[0]
1268
+
1269
+ if callback_on_step_end is not None:
1270
+ callback_kwargs = {}
1271
+ for k in callback_on_step_end_tensor_inputs:
1272
+ callback_kwargs[k] = locals()[k]
1273
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1274
+ latents = callback_outputs.pop("latents", latents)
1275
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1276
+
1277
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1278
+ progress_bar.update()
1279
+
1280
+ if XLA_AVAILABLE:
1281
+ xm.mark_step()
1282
+
1283
+ # Decode
1284
+ latents = self._unpack_latents(
1285
+ latents,
1286
+ latent_num_frames,
1287
+ latent_height,
1288
+ latent_width,
1289
+ self.transformer_spatial_patch_size,
1290
+ self.transformer_temporal_patch_size,
1291
+ )
1292
+ latents = self._denormalize_latents(
1293
+ latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor
1294
+ )
1295
+
1296
+ if clean_audio_latents is not None:
1297
+ latent_channels = clean_audio_latents.shape[1]
1298
+ latent_freq = clean_audio_latents.shape[3]
1299
+ audio_patched = self._patchify_audio_latents(clean_audio_latents)
1300
+ audio_patched = self._denormalize_audio_latents(
1301
+ audio_patched, self.audio_vae.latents_mean, self.audio_vae.latents_std
1302
+ )
1303
+ audio_latents_for_decode = self._unpatchify_audio_latents(audio_patched, latent_channels, latent_freq)
1304
+ else:
1305
+ audio_latents_for_decode = self._denormalize_audio_latents(
1306
+ audio_latents, self.audio_vae.latents_mean, self.audio_vae.latents_std
1307
+ )
1308
+ audio_latents_for_decode = self._unpack_audio_latents(
1309
+ audio_latents_for_decode, audio_num_frames, num_mel_bins=latent_mel_bins
1310
+ )
1311
+
1312
+ if output_type == "latent":
1313
+ video = latents
1314
+ audio_output = audio_latents_for_decode
1315
+ else:
1316
+ latents = latents.to(prompt_embeds.dtype)
1317
+
1318
+ if not self.vae.config.timestep_conditioning:
1319
+ timestep = None
1320
+ else:
1321
+ noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype)
1322
+ if not isinstance(decode_timestep, list):
1323
+ decode_timestep = [decode_timestep] * batch_size
1324
+ if decode_noise_scale is None:
1325
+ decode_noise_scale = decode_timestep
1326
+ elif not isinstance(decode_noise_scale, list):
1327
+ decode_noise_scale = [decode_noise_scale] * batch_size
1328
+
1329
+ timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype)
1330
+ decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[
1331
+ :, None, None, None, None
1332
+ ]
1333
+ latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise
1334
+
1335
+ latents = latents.to(self.vae.dtype)
1336
+ video = self.vae.decode(latents, timestep, return_dict=False)[0]
1337
+ video = self.video_processor.postprocess_video(video, output_type=output_type)
1338
+
1339
+ audio_latents_for_decode = audio_latents_for_decode.to(self.audio_vae.dtype)
1340
+ generated_mel_spectrograms = self.audio_vae.decode(audio_latents_for_decode, return_dict=False)[0]
1341
+ audio_output = self.vocoder(generated_mel_spectrograms)
1342
+
1343
+ self.maybe_free_model_hooks()
1344
+
1345
+ if not return_dict:
1346
+ return (video, audio_output)
1347
+
1348
+ return LTX2PipelineOutput(frames=video, audio=audio_output)