|
|
| """ |
| Flexible Batch I2V Generator with Temporal Consistency |
| Generates N frames at a time (1, 2, 3, etc.) while maintaining temporal consistency |
| Optimized for Image-to-Video models with reference frame initialization |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from typing import List, Optional, Tuple, Dict, Any, Union |
| import numpy as np |
| from collections import deque |
| import math |
| from PIL import Image |
| import torchvision.transforms as transforms |
|
|
| class TemporalConsistencyBuffer: |
| """Enhanced temporal buffer for flexible batch generation""" |
| |
| def __init__(self, buffer_size: int = 8, feature_dim: int = 512): |
| self.buffer_size = buffer_size |
| self.feature_dim = feature_dim |
| self.frame_features = deque(maxlen=buffer_size) |
| self.frame_latents = deque(maxlen=buffer_size) |
| self.frame_images = deque(maxlen=buffer_size) |
| self.motion_vectors = deque(maxlen=buffer_size-1) |
| self.temporal_weights = deque(maxlen=buffer_size) |
| |
| def add_frames(self, features: torch.Tensor, latents: torch.Tensor, images: Optional[torch.Tensor] = None, batch_size: int = 1): |
| """Add batch of frames to temporal buffer""" |
| for i in range(batch_size): |
| frame_feat = features[i:i+1] if features.dim() > 3 else features |
| frame_lat = latents[i:i+1] if latents.dim() > 3 else latents |
| frame_img = images[i:i+1] if images is not None and images.dim() > 3 else images |
| |
| |
| if len(self.frame_features) > 0: |
| motion = frame_feat - self.frame_features[-1] |
| self.motion_vectors.append(motion) |
| |
| self.frame_features.append(frame_feat) |
| self.frame_latents.append(frame_lat) |
| if frame_img is not None: |
| self.frame_images.append(frame_img) |
| |
| |
| weight = 1.0 / (len(self.frame_features) + 1) |
| self.temporal_weights.append(weight) |
| |
| def get_reference_frame(self) -> Optional[torch.Tensor]: |
| """Get the most recent frame as reference for I2V""" |
| if len(self.frame_images) > 0: |
| return self.frame_images[-1] |
| elif len(self.frame_latents) > 0: |
| return self.frame_latents[-1] |
| return None |
| |
| def get_temporal_context(self, num_context_frames: int = 4) -> Dict[str, torch.Tensor]: |
| """Get weighted temporal context for next frame batch""" |
| if len(self.frame_features) == 0: |
| return {"has_context": False} |
| |
| |
| context_size = min(num_context_frames, len(self.frame_features)) |
| recent_features = list(self.frame_features)[-context_size:] |
| recent_latents = list(self.frame_latents)[-context_size:] |
| recent_weights = list(self.temporal_weights)[-context_size:] |
| |
| |
| stacked_features = torch.cat(recent_features, dim=0) |
| stacked_latents = torch.cat(recent_latents, dim=0) |
| weights = torch.tensor(recent_weights, device=stacked_features.device) |
| |
| |
| predicted_motions = [] |
| if len(self.motion_vectors) >= 2: |
| |
| recent_motions = list(self.motion_vectors)[-3:] |
| for step in range(1, 4): |
| if len(recent_motions) >= 2: |
| |
| motion_pred = ( |
| recent_motions[-1] * 1.5 - |
| recent_motions[-2] * 0.5 |
| ) |
| if len(recent_motions) >= 3: |
| motion_pred += recent_motions[-3] * 0.1 |
| else: |
| motion_pred = recent_motions[-1] if recent_motions else None |
| predicted_motions.append(motion_pred) |
| |
| return { |
| "has_context": True, |
| "frame_features": stacked_features, |
| "frame_latents": stacked_latents, |
| "temporal_weights": weights, |
| "predicted_motions": predicted_motions, |
| "sequence_length": len(self.frame_features), |
| "reference_frame": self.get_reference_frame() |
| } |
|
|
| class FlexibleTemporalAttention(nn.Module): |
| """Flexible attention that handles variable batch sizes""" |
| |
| def __init__(self, dim: int, num_heads: int = 8, max_frames: int = 16): |
| super().__init__() |
| self.dim = dim |
| self.num_heads = num_heads |
| self.head_dim = dim // num_heads |
| self.scale = self.head_dim ** -0.5 |
| self.max_frames = max_frames |
| |
| self.qkv = nn.Linear(dim, dim * 3, bias=False) |
| self.proj = nn.Linear(dim, dim) |
| |
| |
| self.temporal_pos_embed = nn.Parameter(torch.randn(1, max_frames, dim) * 0.02) |
| self.frame_type_embed = nn.Parameter(torch.randn(3, dim) * 0.02) |
| |
| |
| self.cross_frame_norm = nn.LayerNorm(dim) |
| self.cross_frame_mlp = nn.Sequential( |
| nn.Linear(dim, dim * 2), |
| nn.GELU(), |
| nn.Linear(dim * 2, dim) |
| ) |
| |
| def forward(self, current_frames: torch.Tensor, temporal_context: Dict[str, Any], num_current_frames: int = 1): |
| """ |
| current_frames: [B*N, H*W, C] where N is number of frames being generated |
| temporal_context: dict with past frame information |
| """ |
| B_times_N, HW, C = current_frames.shape |
| B = B_times_N // num_current_frames |
| |
| if not temporal_context.get("has_context", False): |
| return current_frames |
| |
| |
| current = current_frames.view(B, num_current_frames, HW, C) |
| |
| |
| past_features = temporal_context["frame_features"] |
| T, _, H, W = past_features.shape |
| past_features = past_features.view(T, C, H*W).permute(0, 2, 1) |
| past_features = past_features.unsqueeze(0).expand(B, -1, -1, -1) |
| |
| |
| all_frames = torch.cat([past_features, current], dim=1) |
| total_frames = T + num_current_frames |
| |
| |
| pos_ids = torch.arange(total_frames, device=current_frames.device) |
| pos_embed = self.temporal_pos_embed[:, :total_frames] |
| |
| |
| frame_type_ids = torch.cat([ |
| torch.zeros(T, device=current_frames.device), |
| torch.ones(num_current_frames, device=current_frames.device) |
| ]).long() |
| type_embed = self.frame_type_embed[frame_type_ids] |
| |
| |
| all_frames = all_frames + pos_embed.unsqueeze(2) + type_embed.unsqueeze(0).unsqueeze(2) |
| |
| |
| all_frames_flat = all_frames.view(B, total_frames * HW, C) |
| |
| |
| qkv = self.qkv(all_frames_flat).reshape(B, -1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) |
| q, k, v = qkv[0], qkv[1], qkv[2] |
| |
| |
| attn = (q @ k.transpose(-2, -1)) * self.scale |
| |
| |
| mask = torch.triu(torch.ones(total_frames * HW, total_frames * HW, device=current_frames.device), diagonal=1) |
| mask = mask.bool() |
| attn = attn.masked_fill(mask.unsqueeze(0).unsqueeze(0), float('-inf')) |
| |
| attn = attn.softmax(dim=-1) |
| out = (attn @ v).transpose(1, 2).reshape(B, total_frames * HW, C) |
| |
| |
| current_start = T * HW |
| enhanced_current = out[:, current_start:] |
| enhanced_current = self.proj(enhanced_current) |
| |
| |
| if num_current_frames > 1: |
| enhanced_current = enhanced_current.view(B, num_current_frames, HW, C) |
| for i in range(num_current_frames): |
| frame_i = enhanced_current[:, i] |
| |
| |
| other_frames = torch.cat([ |
| enhanced_current[:, :i], |
| enhanced_current[:, i+1:] |
| ], dim=1) if num_current_frames > 1 else None |
| |
| if other_frames is not None: |
| cross_context = other_frames.mean(dim=1) |
| frame_i_norm = self.cross_frame_norm(frame_i + cross_context) |
| frame_i = frame_i + self.cross_frame_mlp(frame_i_norm) |
| enhanced_current[:, i] = frame_i |
| |
| enhanced_current = enhanced_current.view(B * num_current_frames, HW, C) |
| |
| return enhanced_current |
|
|
| class FlexibleI2VDiffuser(nn.Module): |
| """Flexible I2V diffusion model that generates N frames at a time""" |
| |
| def __init__( |
| self, |
| base_diffusion_model, |
| feature_dim: int = 512, |
| temporal_buffer_size: int = 8, |
| num_attention_heads: int = 8, |
| max_batch_frames: int = 3 |
| ): |
| super().__init__() |
| self.base_model = base_diffusion_model |
| self.feature_dim = feature_dim |
| self.temporal_buffer_size = temporal_buffer_size |
| self.max_batch_frames = max_batch_frames |
| |
| |
| self.image_encoder = nn.Sequential( |
| nn.Conv2d(3, feature_dim // 4, 7, padding=3), |
| nn.GroupNorm(8, feature_dim // 4), |
| nn.SiLU(), |
| nn.Conv2d(feature_dim // 4, feature_dim // 2, 3, padding=1, stride=2), |
| nn.GroupNorm(8, feature_dim // 2), |
| nn.SiLU(), |
| nn.Conv2d(feature_dim // 2, feature_dim, 3, padding=1, stride=2), |
| nn.GroupNorm(8, feature_dim), |
| nn.SiLU() |
| ) |
| |
| self.latent_encoder = nn.Conv2d( |
| base_diffusion_model.in_channels, feature_dim, 3, padding=1 |
| ) |
| |
| |
| self.temporal_attention = FlexibleTemporalAttention( |
| feature_dim, num_attention_heads, max_batch_frames * 4 |
| ) |
| |
| |
| self.reference_adapter = nn.Sequential( |
| nn.Conv2d(feature_dim * 2, feature_dim, 1), |
| nn.GroupNorm(8, feature_dim), |
| nn.SiLU() |
| ) |
| |
| self.motion_conditioner = nn.Sequential( |
| nn.Linear(feature_dim, feature_dim * 2), |
| nn.GELU(), |
| nn.Linear(feature_dim * 2, feature_dim) |
| ) |
| |
| |
| self.frame_consistency_net = nn.Sequential( |
| nn.Conv3d(feature_dim, feature_dim, (3, 3, 3), padding=(1, 1, 1)), |
| nn.GroupNorm(8, feature_dim), |
| nn.SiLU(), |
| nn.Conv3d(feature_dim, feature_dim, (1, 3, 3), padding=(0, 1, 1)) |
| ) |
| |
|
|
| |
| self.temporal_buffer = TemporalConsistencyBuffer(temporal_buffer_size, feature_dim) |
| |
| def encode_reference_image(self, image: torch.Tensor) -> torch.Tensor: |
| """Encode reference image for I2V conditioning""" |
| if image.shape[1] == 3: |
| return self.image_encoder(image) |
| else: |
| return self.latent_encoder(image) |
| |
| def apply_i2v_conditioning( |
| self, |
| current_latents: torch.Tensor, |
| temporal_context: Dict[str, Any], |
| num_frames: int = 1 |
| ) -> torch.Tensor: |
| """Apply I2V conditioning with flexible frame count""" |
| |
| B_times_N, C, H, W = current_latents.shape |
| B = B_times_N // num_frames |
| |
| |
| current_features = self.latent_encoder(current_latents) |
| |
| if not temporal_context.get("has_context", False): |
| return current_latents |
| |
| |
| current_flat = current_features.flatten(2).transpose(1, 2) |
| enhanced_features = self.temporal_attention(current_flat, temporal_context, num_frames) |
| enhanced_features = enhanced_features.transpose(1, 2).reshape(B_times_N, -1, H, W) |
| |
| |
| if temporal_context.get("reference_frame") is not None: |
| ref_frame = temporal_context["reference_frame"] |
| ref_features = self.encode_reference_image(ref_frame) |
| |
| |
| ref_features = ref_features.repeat(num_frames, 1, 1, 1) |
| |
| |
| combined_features = torch.cat([enhanced_features, ref_features], dim=1) |
| conditioned_features = self.reference_adapter(combined_features) |
| else: |
| conditioned_features = enhanced_features |
| |
| |
| if num_frames > 1: |
| |
| batch_features = conditioned_features.view(B, num_frames, -1, H, W) |
| batch_features = batch_features.permute(0, 2, 1, 3, 4) |
| |
| |
| consistent_features = self.frame_consistency_net(batch_features) |
| consistent_features = consistent_features.permute(0, 2, 1, 3, 4) |
| conditioned_features = consistent_features.reshape(B_times_N, -1, H, W) |
| |
| |
| if temporal_context.get("predicted_motions"): |
| motions = temporal_context["predicted_motions"][:num_frames] |
| for i, motion in enumerate(motions): |
| if motion is not None: |
| frame_idx = i * B + torch.arange(B, device=current_latents.device) |
| motion_flat = motion.flatten(2).transpose(1, 2).mean(dim=1) |
| motion_cond = self.motion_conditioner(motion_flat) |
| motion_cond = motion_cond.unsqueeze(-1).unsqueeze(-1) |
| conditioned_features[frame_idx] += motion_cond |
| |
| |
| alpha = 0.4 |
| enhanced_latents = current_latents + alpha * conditioned_features |
| |
| return enhanced_latents |
| |
| def forward( |
| self, |
| noisy_latents: torch.Tensor, |
| timestep: torch.Tensor, |
| text_embeddings: torch.Tensor, |
| num_frames: int = 1, |
| use_temporal_consistency: bool = True |
| ) -> torch.Tensor: |
| """Forward pass with flexible frame count""" |
| |
| if use_temporal_consistency: |
| |
| temporal_context = self.temporal_buffer.get_temporal_context() |
| |
| |
| enhanced_latents = self.apply_i2v_conditioning( |
| noisy_latents, temporal_context, num_frames |
| ) |
| else: |
| enhanced_latents = noisy_latents |
| |
| |
| if text_embeddings.shape[0] != enhanced_latents.shape[0]: |
| text_embeddings = text_embeddings.repeat(num_frames, 1, 1) |
| |
| |
| noise_pred = self.base_model(enhanced_latents, timestep, text_embeddings) |
| |
| return noise_pred |
| |
| def update_temporal_buffer(self, latents: torch.Tensor, images: Optional[torch.Tensor] = None, num_frames: int = 1): |
| """Update temporal buffer with generated frames""" |
| with torch.no_grad(): |
| features = self.latent_encoder(latents) |
| self.temporal_buffer.add_frames(features, latents, images, num_frames) |
|
|
| class FlexibleI2VGenerator: |
| """High-level generator with configurable frame batch sizes""" |
| |
| def __init__( |
| self, |
| diffusion_model: FlexibleI2VDiffuser, |
| scheduler, |
| vae, |
| device: str = "cuda" |
| ): |
| self.model = diffusion_model |
| self.scheduler = scheduler |
| self.vae = vae |
| self.device = device |
| |
| |
| self.image_transform = transforms.Compose([ |
| transforms.Resize((512, 512)), |
| transforms.ToTensor(), |
| transforms.Normalize([0.5], [0.5]) |
| ]) |
| |
| def encode_image(self, image: Union[Image.Image, torch.Tensor]) -> torch.Tensor: |
| """Encode PIL image or tensor to latent space""" |
| if isinstance(image, Image.Image): |
| image = self.image_transform(image).unsqueeze(0).to(self.device) |
| |
| with torch.no_grad(): |
| latent = self.vae.encode(image).latent_dist.sample() |
| latent = latent * self.vae.config.scaling_factor |
| |
| return latent |
| |
| def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: |
| """Decode latents to images""" |
| with torch.no_grad(): |
| latents = latents / self.vae.config.scaling_factor |
| images = self.vae.decode(latents).sample |
| images = (images + 1.0) / 2.0 |
| images = torch.clamp(images, 0.0, 1.0) |
| return images |
| |
| @torch.no_grad() |
| def generate_i2v_sequence( |
| self, |
| reference_image: Union[Image.Image, torch.Tensor], |
| prompt: str, |
| text_encoder, |
| tokenizer, |
| num_frames: int = 16, |
| frames_per_batch: int = 2, |
| num_inference_steps: int = 20, |
| guidance_scale: float = 7.5, |
| generator: Optional[torch.Generator] = None, |
| callback=None |
| ) -> List[torch.Tensor]: |
| """Generate I2V sequence with configurable batch size""" |
| |
| print(f"π¬ Generating {num_frames} frames in batches of {frames_per_batch}") |
| |
| |
| ref_latent = self.encode_image(reference_image) |
| ref_image_tensor = reference_image if isinstance(reference_image, torch.Tensor) else \ |
| self.image_transform(reference_image).unsqueeze(0).to(self.device) |
| |
| |
| text_inputs = tokenizer( |
| prompt, |
| padding="max_length", |
| max_length=tokenizer.model_max_length, |
| truncation=True, |
| return_tensors="pt" |
| ) |
| text_embeddings = text_encoder(text_inputs.input_ids.to(self.device))[0] |
| |
| |
| uncond_tokens = [""] |
| uncond_inputs = tokenizer( |
| uncond_tokens, |
| padding="max_length", |
| max_length=tokenizer.model_max_length, |
| return_tensors="pt" |
| ) |
| uncond_embeddings = text_encoder(uncond_inputs.input_ids.to(self.device))[0] |
| |
| |
| self.model.temporal_buffer = TemporalConsistencyBuffer( |
| self.model.temporal_buffer_size, |
| self.model.feature_dim |
| ) |
| self.model.update_temporal_buffer(ref_latent, ref_image_tensor, 1) |
| |
| generated_frames = [ref_latent] |
| latent_shape = ref_latent.shape |
| |
| |
| frames_generated = 1 |
| |
| while frames_generated < num_frames: |
| |
| remaining_frames = num_frames - frames_generated |
| current_batch_size = min(frames_per_batch, remaining_frames) |
| |
| print(f"π― Generating frames {frames_generated+1}-{frames_generated+current_batch_size}") |
| |
| |
| batch_latents = torch.randn( |
| (current_batch_size, *latent_shape[1:]), |
| generator=generator, |
| device=self.device, |
| dtype=text_embeddings.dtype |
| ) |
| |
| |
| batch_text_embeddings = torch.cat([ |
| uncond_embeddings.repeat(current_batch_size, 1, 1), |
| text_embeddings.repeat(current_batch_size, 1, 1) |
| ]) |
| |
| |
| self.scheduler.set_timesteps(num_inference_steps, device=self.device) |
| timesteps = self.scheduler.timesteps |
| |
| |
| for i, t in enumerate(timesteps): |
| |
| latent_model_input = torch.cat([batch_latents] * 2) |
| latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
| |
| |
| noise_pred = self.model( |
| latent_model_input, |
| t, |
| batch_text_embeddings, |
| num_frames=current_batch_size, |
| use_temporal_consistency=True |
| ) |
| |
| |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
| |
| |
| batch_latents = self.scheduler.step(noise_pred, t, batch_latents).prev_sample |
| |
| if callback: |
| callback(i, t, batch_latents) |
| |
| |
| batch_images = self.decode_latents(batch_latents) |
| self.model.update_temporal_buffer(batch_latents, batch_images, current_batch_size) |
| |
| |
| for j in range(current_batch_size): |
| generated_frames.append(batch_latents[j:j+1]) |
| |
| frames_generated += current_batch_size |
| print(f"β
Generated {current_batch_size} frames") |
| |
| return generated_frames |
| |
| def generate_with_stepping_strategy( |
| self, |
| reference_image: Union[Image.Image, torch.Tensor], |
| prompt: str, |
| text_encoder, |
| tokenizer, |
| total_frames: int = 24, |
| stepping_pattern: List[int] = [1, 2, 3, 2, 1], |
| **kwargs |
| ) -> List[torch.Tensor]: |
| """Generate with dynamic stepping pattern""" |
| |
| all_frames = [] |
| frames_generated = 0 |
| step_idx = 0 |
| |
| while frames_generated < total_frames: |
| |
| current_step = stepping_pattern[step_idx % len(stepping_pattern)] |
| remaining = total_frames - frames_generated |
| actual_step = min(current_step, remaining) |
| |
| print(f"π Step {step_idx + 1}: Generating {actual_step} frames") |
| |
| |
| if frames_generated == 0: |
| |
| frames = self.generate_i2v_sequence( |
| reference_image=reference_image, |
| prompt=prompt, |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| num_frames=actual_step + 1, |
| frames_per_batch=actual_step, |
| **kwargs |
| ) |
| all_frames.extend(frames) |
| frames_generated += len(frames) |
| else: |
| |
| last_frame_latent = all_frames[-1] |
| last_frame_image = self.decode_latents(last_frame_latent) |
| |
| frames = self.generate_i2v_sequence( |
| reference_image=last_frame_image, |
| prompt=prompt, |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| num_frames=actual_step + 1, |
| frames_per_batch=actual_step, |
| **kwargs |
| ) |
| all_frames.extend(frames[1:]) |
| frames_generated += len(frames) - 1 |
| |
| step_idx += 1 |
| |
| return all_frames[:total_frames] |
|
|
| |
| def example_usage(): |
| """Example of flexible I2V generation""" |
| |
| |
| |
| base_model, scheduler, vae, text_encoder, tokenizer = load_models() |
| |
| |
| i2v_model = FlexibleI2VDiffuser( |
| base_diffusion_model=i2v_model, |
| feature_dim=512, |
| temporal_buffer_size=8, |
| max_batch_frames=3 |
| ) |
| |
| |
| generator = FlexibleI2VGenerator( |
| diffusion_model=i2v_model, |
| scheduler=scheduler, |
| vae=vae, |
| device="cuda" |
| ) |
| |
| |
| reference_image = Image.open("reference.jpg") |
| |
| |
| frames_fixed = generator.generate_i2v_sequence( |
| reference_image=reference_image, |
| prompt="A cat walking in a garden", |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| num_frames=16, |
| frames_per_batch=2, |
| num_inference_steps=20 |
| ) |
| |
| |
| frames_variable = generator.generate_with_stepping_strategy( |
| reference_image=reference_image, |
| prompt="A cat walking in a garden", |
| text_encoder=text_encoder, |
| tokenizer=tokenizer, |
| total_frames=24, |
| stepping_pattern=[1, 2, 3, 2, 1], |
| num_inference_steps=20 |
| ) |
| |
| print(f"π Generated {len(frames_fixed)} frames with fixed batching") |
| print(f"π Generated {len(frames_variable)} frames with variable stepping") |
|
|
| if __name__ == "__main__": |
| example_usage() |