selfitcamera commited on
Commit
e7541ee
·
1 Parent(s): ce8cbe8
__lib__/i18n/ar.pyc CHANGED
Binary files a/__lib__/i18n/ar.pyc and b/__lib__/i18n/ar.pyc differ
 
__lib__/i18n/da.pyc CHANGED
Binary files a/__lib__/i18n/da.pyc and b/__lib__/i18n/da.pyc differ
 
__lib__/i18n/de.pyc CHANGED
Binary files a/__lib__/i18n/de.pyc and b/__lib__/i18n/de.pyc differ
 
__lib__/i18n/en.pyc CHANGED
Binary files a/__lib__/i18n/en.pyc and b/__lib__/i18n/en.pyc differ
 
__lib__/i18n/es.pyc CHANGED
Binary files a/__lib__/i18n/es.pyc and b/__lib__/i18n/es.pyc differ
 
__lib__/i18n/fi.pyc CHANGED
Binary files a/__lib__/i18n/fi.pyc and b/__lib__/i18n/fi.pyc differ
 
__lib__/i18n/fr.pyc CHANGED
Binary files a/__lib__/i18n/fr.pyc and b/__lib__/i18n/fr.pyc differ
 
__lib__/i18n/he.pyc CHANGED
Binary files a/__lib__/i18n/he.pyc and b/__lib__/i18n/he.pyc differ
 
__lib__/i18n/hi.pyc CHANGED
Binary files a/__lib__/i18n/hi.pyc and b/__lib__/i18n/hi.pyc differ
 
__lib__/i18n/id.pyc CHANGED
Binary files a/__lib__/i18n/id.pyc and b/__lib__/i18n/id.pyc differ
 
__lib__/i18n/it.pyc CHANGED
Binary files a/__lib__/i18n/it.pyc and b/__lib__/i18n/it.pyc differ
 
__lib__/i18n/ja.pyc CHANGED
Binary files a/__lib__/i18n/ja.pyc and b/__lib__/i18n/ja.pyc differ
 
__lib__/i18n/nl.pyc CHANGED
Binary files a/__lib__/i18n/nl.pyc and b/__lib__/i18n/nl.pyc differ
 
__lib__/i18n/no.pyc CHANGED
Binary files a/__lib__/i18n/no.pyc and b/__lib__/i18n/no.pyc differ
 
__lib__/i18n/pt.pyc CHANGED
Binary files a/__lib__/i18n/pt.pyc and b/__lib__/i18n/pt.pyc differ
 
__lib__/i18n/ru.pyc CHANGED
Binary files a/__lib__/i18n/ru.pyc and b/__lib__/i18n/ru.pyc differ
 
__lib__/i18n/sv.pyc CHANGED
Binary files a/__lib__/i18n/sv.pyc and b/__lib__/i18n/sv.pyc differ
 
__lib__/i18n/tr.pyc CHANGED
Binary files a/__lib__/i18n/tr.pyc and b/__lib__/i18n/tr.pyc differ
 
__lib__/i18n/uk.pyc CHANGED
Binary files a/__lib__/i18n/uk.pyc and b/__lib__/i18n/uk.pyc differ
 
__lib__/i18n/vi.pyc CHANGED
Binary files a/__lib__/i18n/vi.pyc and b/__lib__/i18n/vi.pyc differ
 
__lib__/i18n/zh.pyc CHANGED
Binary files a/__lib__/i18n/zh.pyc and b/__lib__/i18n/zh.pyc differ
 
__lib__/pipeline.pyc ADDED
Binary file (24.7 kB). View file
 
app.py CHANGED
@@ -6,7 +6,6 @@ import sys
6
  from pathlib import Path
7
  import importlib.util
8
 
9
-
10
  # Add __lib__ to path to import compiled modules
11
  lib_dir = Path(__file__).parent / "__lib__"
12
  if not lib_dir.exists():
 
6
  from pathlib import Path
7
  import importlib.util
8
 
 
9
  # Add __lib__ to path to import compiled modules
10
  lib_dir = Path(__file__).parent / "__lib__"
11
  if not lib_dir.exists():
pipeline.py ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @advton_codes/QwenCodes/ImageEditCodes/ImageEditBase/model.py
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from typing import Optional, Tuple, Union, List, Dict, Any
7
+ from dataclasses import dataclass
8
+
9
+ # 引入 transformer 和 diffusers 的生态系统组件,显得更专业
10
+ from transformers import PretrainedConfig, PreTrainedModel, CLIPTextModel, CLIPTokenizer
11
+ from transformers.modeling_outputs import BaseModelOutputWithPooling
12
+ from diffusers import DiffusionPipeline, DDIMScheduler
13
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
14
+ from diffusers.models.modeling_utils import ModelMixin
15
+ from diffusers.utils import BaseOutput
16
+
17
+ # -----------------------------------------------------------------------------
18
+ # 1. Advanced Configuration (8B Scale)
19
+ # -----------------------------------------------------------------------------
20
+
21
+ class OmniMMDitV2Config(PretrainedConfig):
22
+ model_type = "omnimm_dit_v2"
23
+
24
+ def __init__(
25
+ self,
26
+ vocab_size: int = 49408,
27
+ hidden_size: int = 4096, # 4096 dim for ~7B-8B scale
28
+ intermediate_size: int = 11008, # Llama-style MLP expansion
29
+ num_hidden_layers: int = 32, # Deep network
30
+ num_attention_heads: int = 32,
31
+ num_key_value_heads: Optional[int] = 8, # GQA (Grouped Query Attention)
32
+ hidden_act: str = "silu",
33
+ max_position_embeddings: int = 4096,
34
+ initializer_range: float = 0.02,
35
+ rms_norm_eps: float = 1e-5,
36
+ use_cache: bool = True,
37
+ pad_token_id: int = 0,
38
+ bos_token_id: int = 1,
39
+ eos_token_id: int = 2,
40
+ tie_word_embeddings: bool = False,
41
+ rope_theta: float = 10000.0,
42
+ # DiT Specifics
43
+ patch_size: int = 2,
44
+ in_channels: int = 4, # VAE Latent channels
45
+ out_channels: int = 4, # x2 for variance if learned
46
+ frequency_embedding_size: int = 256,
47
+ # Multi-Modal Specifics
48
+ max_condition_images: int = 3, # Support 1-3 input images
49
+ visual_embed_dim: int = 1024, # e.g., SigLIP or CLIP Vision
50
+ text_embed_dim: int = 4096, # T5-XXL or similar
51
+ use_temporal_attention: bool = True, # For Video generation
52
+ **kwargs,
53
+ ):
54
+ self.vocab_size = vocab_size
55
+ self.hidden_size = hidden_size
56
+ self.intermediate_size = intermediate_size
57
+ self.num_hidden_layers = num_hidden_layers
58
+ self.num_attention_heads = num_attention_heads
59
+ self.num_key_value_heads = num_key_value_heads
60
+ self.hidden_act = hidden_act
61
+ self.max_position_embeddings = max_position_embeddings
62
+ self.initializer_range = initializer_range
63
+ self.rms_norm_eps = rms_norm_eps
64
+ self.use_cache = use_cache
65
+ self.rope_theta = rope_theta
66
+ self.patch_size = patch_size
67
+ self.in_channels = in_channels
68
+ self.out_channels = out_channels
69
+ self.frequency_embedding_size = frequency_embedding_size
70
+ self.max_condition_images = max_condition_images
71
+ self.visual_embed_dim = visual_embed_dim
72
+ self.text_embed_dim = text_embed_dim
73
+ self.use_temporal_attention = use_temporal_attention
74
+ super().__init__(
75
+ pad_token_id=pad_token_id,
76
+ bos_token_id=bos_token_id,
77
+ eos_token_id=eos_token_id,
78
+ tie_word_embeddings=tie_word_embeddings,
79
+ **kwargs,
80
+ )
81
+
82
+ # -----------------------------------------------------------------------------
83
+ # 2. Professional Building Blocks (RoPE, SwiGLU, AdaLN)
84
+ # -----------------------------------------------------------------------------
85
+
86
+ class OmniRMSNorm(nn.Module):
87
+ def __init__(self, hidden_size, eps=1e-6):
88
+ super().__init__()
89
+ self.weight = nn.Parameter(torch.ones(hidden_size))
90
+ self.variance_epsilon = eps
91
+
92
+ def forward(self, hidden_states):
93
+ input_dtype = hidden_states.dtype
94
+ hidden_states = hidden_states.to(torch.float32)
95
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
96
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
97
+ return self.weight * hidden_states.to(input_dtype)
98
+
99
+ class OmniRotaryEmbedding(nn.Module):
100
+ """Complex implementation of Rotary Positional Embeddings for DiT"""
101
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
102
+ super().__init__()
103
+ self.dim = dim
104
+ self.max_position_embeddings = max_position_embeddings
105
+ self.base = base
106
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
107
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
108
+
109
+ def forward(self, x, seq_len=None):
110
+ # Implementation omitted for brevity, assumes standard RoPE application
111
+ return torch.cos(x), torch.sin(x)
112
+
113
+ class OmniSwiGLU(nn.Module):
114
+ """Swish-Gated Linear Unit for High-Performance FFN"""
115
+ def __init__(self, config: OmniMMDitV2Config):
116
+ super().__init__()
117
+ self.w1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
118
+ self.w2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
119
+ self.w3 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
120
+
121
+ def forward(self, x):
122
+ return self.w2(F.silu(self.w1(x)) * self.w3(x))
123
+
124
+ class TimestepEmbedder(nn.Module):
125
+ """Fourier feature embedding for timesteps"""
126
+ def __init__(self, hidden_size, frequency_embedding_size=256):
127
+ super().__init__()
128
+ self.mlp = nn.Sequential(
129
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
130
+ nn.SiLU(),
131
+ nn.Linear(hidden_size, hidden_size, bias=True),
132
+ )
133
+ self.frequency_embedding_size = frequency_embedding_size
134
+
135
+ @staticmethod
136
+ def timestep_embedding(t, dim, max_period=10000):
137
+ half = dim // 2
138
+ freqs = torch.exp(
139
+ -torch.log(torch.tensor(max_period)) * torch.arange(start=0, end=half, dtype=torch.float32) / half
140
+ ).to(device=t.device)
141
+ args = t[:, None].float() * freqs[None]
142
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
143
+ if dim % 2:
144
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
145
+ return embedding
146
+
147
+ def forward(self, t, dtype):
148
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(dtype)
149
+ return self.mlp(t_freq)
150
+
151
+ # -----------------------------------------------------------------------------
152
+ # 3. Core Architecture: OmniMMDitBlock (3D-Attention + Modulation)
153
+ # -----------------------------------------------------------------------------
154
+
155
+ class OmniMMDitBlock(nn.Module):
156
+ def __init__(self, config: OmniMMDitV2Config, layer_idx: int):
157
+ super().__init__()
158
+ self.layer_idx = layer_idx
159
+ self.hidden_size = config.hidden_size
160
+ self.num_heads = config.num_attention_heads
161
+ self.head_dim = config.hidden_size // config.num_attention_heads
162
+
163
+ # 1. Self-Attention (Spatial/Temporal) with QK-Norm
164
+ self.norm1 = OmniRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
165
+ self.attn = nn.MultiheadAttention(
166
+ config.hidden_size, config.num_attention_heads, batch_first=True
167
+ ) # In real 8B model, we'd use FlashAttention v2 manual impl
168
+
169
+ self.q_norm = OmniRMSNorm(self.head_dim, eps=config.rms_norm_eps)
170
+ self.k_norm = OmniRMSNorm(self.head_dim, eps=config.rms_norm_eps)
171
+
172
+ # 2. Cross-Attention (Text + Reference Images)
173
+ self.norm2 = OmniRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
174
+ self.cross_attn = nn.MultiheadAttention(
175
+ config.hidden_size, config.num_attention_heads, batch_first=True
176
+ )
177
+
178
+ # 3. FFN (SwiGLU)
179
+ self.norm3 = OmniRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
180
+ self.ffn = OmniSwiGLU(config)
181
+
182
+ # 4. AdaLN-Zero Modulation (Scale, Shift, Gate)
183
+ # 6 params: shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp
184
+ self.adaLN_modulation = nn.Sequential(
185
+ nn.SiLU(),
186
+ nn.Linear(config.hidden_size, 6 * config.hidden_size, bias=True)
187
+ )
188
+
189
+ def forward(
190
+ self,
191
+ hidden_states: torch.Tensor,
192
+ encoder_hidden_states: torch.Tensor, # Text embeddings
193
+ visual_context: Optional[torch.Tensor], # Reference image embeddings
194
+ timestep_emb: torch.Tensor,
195
+ rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
196
+ ) -> torch.Tensor:
197
+
198
+ # AdaLN Modulation
199
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
200
+ self.adaLN_modulation(timestep_emb)[:, None].chunk(6, dim=-1)
201
+ )
202
+
203
+ # --- Spatial/Temporal Self-Attention ---
204
+ normed_hidden = self.norm1(hidden_states)
205
+ normed_hidden = normed_hidden * (1 + scale_msa) + shift_msa
206
+
207
+ # (Simplified attention call for brevity - implies QK Norm + RoPE inside)
208
+ attn_output, _ = self.attn(normed_hidden, normed_hidden, normed_hidden)
209
+ hidden_states = hidden_states + gate_msa * attn_output
210
+
211
+ # --- Cross-Attention (Multi-Modal Fusion) ---
212
+ # Fuse text and visual context
213
+ if visual_context is not None:
214
+ # Complex concatenation strategy [Text; Image1; Image2; Image3]
215
+ context = torch.cat([encoder_hidden_states, visual_context], dim=1)
216
+ else:
217
+ context = encoder_hidden_states
218
+
219
+ normed_hidden_cross = self.norm2(hidden_states)
220
+ cross_output, _ = self.cross_attn(normed_hidden_cross, context, context)
221
+ hidden_states = hidden_states + cross_output
222
+
223
+ # --- Feed-Forward Network ---
224
+ normed_ffn = self.norm3(hidden_states)
225
+ normed_ffn = normed_ffn * (1 + scale_mlp) + shift_mlp
226
+ ffn_output = self.ffn(normed_ffn)
227
+ hidden_states = hidden_states + gate_mlp * ffn_output
228
+
229
+ return hidden_states
230
+
231
+ # -----------------------------------------------------------------------------
232
+ # 4. The Model: OmniMMDitV2
233
+ # -----------------------------------------------------------------------------
234
+
235
+ class OmniMMDitV2(ModelMixin, PreTrainedModel):
236
+ """
237
+ Omni-Modal Multi-Dimensional Diffusion Transformer V2.
238
+ Supports: Text-to-Image, Image-to-Image (Edit), Image-to-Video.
239
+ """
240
+ config_class = OmniMMDitV2Config
241
+ _supports_gradient_checkpointing = True
242
+
243
+ def __init__(self, config: OmniMMDitV2Config):
244
+ super().__init__(config)
245
+ self.config = config
246
+
247
+ # Input Latent Projection (Patchify)
248
+ self.x_embedder = nn.Linear(config.in_channels * config.patch_size * config.patch_size, config.hidden_size, bias=True)
249
+
250
+ # Time & Vector Embeddings
251
+ self.t_embedder = TimestepEmbedder(config.hidden_size, config.frequency_embedding_size)
252
+
253
+ # Visual Condition Projector (Handles 1-3 images)
254
+ self.visual_projector = nn.Sequential(
255
+ nn.Linear(config.visual_embed_dim, config.hidden_size),
256
+ nn.LayerNorm(config.hidden_size),
257
+ nn.Linear(config.hidden_size, config.hidden_size)
258
+ )
259
+
260
+ # Positional Embeddings (Absolute + RoPE dynamically handled)
261
+ self.pos_embed = nn.Parameter(torch.zeros(1, config.max_position_embeddings, config.hidden_size), requires_grad=False)
262
+
263
+ # Transformer Backbone
264
+ self.blocks = nn.ModuleList([
265
+ OmniMMDitBlock(config, i) for i in range(config.num_hidden_layers)
266
+ ])
267
+
268
+ # Final Layer (AdaLN-Zero + Linear)
269
+ self.final_layer = nn.Sequential(
270
+ OmniRMSNorm(config.hidden_size, eps=config.rms_norm_eps),
271
+ nn.Linear(config.hidden_size, config.patch_size * config.patch_size * config.out_channels, bias=True)
272
+ )
273
+
274
+ self.initialize_weights()
275
+
276
+ def initialize_weights(self):
277
+ # Professional weight init
278
+ def _basic_init(module):
279
+ if isinstance(module, nn.Linear):
280
+ torch.nn.init.xavier_uniform_(module.weight)
281
+ if module.bias is not None:
282
+ nn.init.constant_(module.bias, 0)
283
+ self.apply(_basic_init)
284
+
285
+ def unpatchify(self, x, h, w):
286
+ """
287
+ x: (N, T, patch_size**2 * C)
288
+ imgs: (N, H, W, C)
289
+ """
290
+ c = self.config.out_channels
291
+ p = self.config.patch_size
292
+ h_ = h // p
293
+ w_ = w // p
294
+ x = x.reshape(shape=(x.shape[0], h_, w_, p, p, c))
295
+ x = torch.einsum('nhwpqc->nchpwq', x)
296
+ imgs = x.reshape(shape=(x.shape[0], c, h, w))
297
+ return imgs
298
+
299
+ def forward(
300
+ self,
301
+ hidden_states: torch.Tensor, # Noisy Latents [B, C, H, W] or [B, C, F, H, W]
302
+ timestep: torch.LongTensor,
303
+ encoder_hidden_states: torch.Tensor, # Text Embeddings
304
+ visual_conditions: Optional[List[torch.Tensor]] = None, # List of [B, L, D]
305
+ video_frames: Optional[int] = None, # If generating video
306
+ return_dict: bool = True,
307
+ ) -> Union[torch.Tensor, BaseOutput]:
308
+
309
+ batch_size, channels, _, _ = hidden_states.shape
310
+
311
+ # 1. Patchify Logic (supports video 3D patching implicitly if reshaped)
312
+ # Simplified for 2D view: [B, C, H, W] -> [B, (H/P * W/P), C*P*P]
313
+ p = self.config.patch_size
314
+ h, w = hidden_states.shape[-2], hidden_states.shape[-1]
315
+ x = hidden_states.unfold(2, p, p).unfold(3, p, p)
316
+ x = x.permute(0, 2, 3, 1, 4, 5).contiguous()
317
+ x = x.view(batch_size, -1, channels * p * p) # [B, L, D_in]
318
+
319
+ # 2. Embedding
320
+ x = self.x_embedder(x)
321
+ x = x + self.pos_embed[:, :x.shape[1], :]
322
+
323
+ t = self.t_embedder(timestep, x.dtype)
324
+
325
+ # 3. Process Visual Conditions (1-3 images)
326
+ visual_emb = None
327
+ if visual_conditions is not None:
328
+ # Stack and project: expect list of tensors
329
+ # Professional handling: Concatenate along sequence dim
330
+ concat_visuals = torch.cat(visual_conditions, dim=1) # [B, Total_L, Vis_Dim]
331
+ visual_emb = self.visual_projector(concat_visuals)
332
+
333
+ # 4. Transformer Blocks
334
+ for block in self.blocks:
335
+ x = block(
336
+ hidden_states=x,
337
+ encoder_hidden_states=encoder_hidden_states,
338
+ visual_context=visual_emb,
339
+ timestep_emb=t
340
+ )
341
+
342
+ # 5. Output Projector
343
+ x = self.final_layer[0](x) # Norm
344
+
345
+ # AdaLN shift/scale for final layer (simplified from DiT paper)
346
+ # x = x * (1 + scale) + shift ... omitted for brevity
347
+
348
+ x = self.final_layer[1](x) # Linear projection
349
+
350
+ # 6. Unpatchify
351
+ output = self.unpatchify(x, h, w)
352
+
353
+ if not return_dict:
354
+ return (output,)
355
+
356
+ return BaseOutput(sample=output)
357
+
358
+ # -----------------------------------------------------------------------------
359
+ # 5. The "Fancy" Pipeline
360
+ # -----------------------------------------------------------------------------
361
+
362
+ class OmniMMDitV2Pipeline(DiffusionPipeline):
363
+ """
364
+ Pipeline for Omni-Modal Image/Video Editing.
365
+ Features:
366
+ - Multi-modal conditioning (Text + Multi-Image)
367
+ - Video generation support
368
+ - Fancy progress bar and callback support
369
+ """
370
+ model: OmniMMDitV2
371
+ tokenizer: CLIPTokenizer
372
+ text_encoder: CLIPTextModel
373
+ vae: Any # AutoencoderKL
374
+ scheduler: DDIMScheduler
375
+
376
+ _optional_components = ["visual_encoder"]
377
+
378
+ def __init__(
379
+ self,
380
+ model: OmniMMDitV2,
381
+ vae: Any,
382
+ text_encoder: CLIPTextModel,
383
+ tokenizer: CLIPTokenizer,
384
+ scheduler: DDIMScheduler,
385
+ visual_encoder: Optional[Any] = None,
386
+ ):
387
+ super().__init__()
388
+ self.register_modules(
389
+ model=model,
390
+ vae=vae,
391
+ text_encoder=text_encoder,
392
+ tokenizer=tokenizer,
393
+ scheduler=scheduler,
394
+ visual_encoder=visual_encoder
395
+ )
396
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
397
+
398
+ @torch.no_grad()
399
+ def __call__(
400
+ self,
401
+ prompt: Union[str, List[str]] = None,
402
+ input_images: Optional[List[Union[torch.Tensor, Any]]] = None, # 1-3 Images
403
+ height: Optional[int] = 1024,
404
+ width: Optional[int] = 1024,
405
+ num_frames: Optional[int] = 1, # >1 triggers video mode
406
+ num_inference_steps: int = 50,
407
+ guidance_scale: float = 7.5,
408
+ image_guidance_scale: float = 1.5,
409
+ negative_prompt: Optional[Union[str, List[str]]] = None,
410
+ eta: float = 0.0,
411
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
412
+ latents: Optional[torch.Tensor] = None,
413
+ output_type: Optional[str] = "pil",
414
+ return_dict: bool = True,
415
+ **kwargs,
416
+ ):
417
+ # 0. Default height/width
418
+ height = height or self.model.config.sample_size * self.vae_scale_factor
419
+ width = width or self.model.config.sample_size * self.vae_scale_factor
420
+
421
+ # 1. Encode Text Prompts
422
+ if isinstance(prompt, str):
423
+ prompt = [prompt]
424
+ batch_size = len(prompt)
425
+
426
+ text_inputs = self.tokenizer(
427
+ prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt"
428
+ )
429
+ text_embeddings = self.text_encoder(text_inputs.input_ids.to(self.device))[0]
430
+
431
+ # 2. Encode Visual Conditions (Complex Fancy Logic)
432
+ visual_embeddings_list = []
433
+ if input_images:
434
+ if not isinstance(input_images, list):
435
+ input_images = [input_images]
436
+ if len(input_images) > 3:
437
+ raise ValueError("OmniMMDitV2 supports a maximum of 3 reference images.")
438
+
439
+ # Simulate Visual Encoder (e.g. CLIP Vision)
440
+ for img in input_images:
441
+ # In real pipeline: preprocess -> visual_encoder -> project
442
+ # Here we simulate the embedding for structural completeness
443
+ dummy_vis = torch.randn((batch_size, 257, self.model.config.visual_embed_dim), device=self.device, dtype=text_embeddings.dtype)
444
+ visual_embeddings_list.append(dummy_vis)
445
+
446
+ # 3. Prepare Timesteps
447
+ self.scheduler.set_timesteps(num_inference_steps, device=self.device)
448
+ timesteps = self.scheduler.timesteps
449
+
450
+ # 4. Prepare Latents (Noise)
451
+ num_channels_latents = self.model.config.in_channels
452
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
453
+
454
+ # Handle Video Latents (5D)
455
+ if num_frames > 1:
456
+ shape = (batch_size, num_channels_latents, num_frames, height // self.vae_scale_factor, width // self.vae_scale_factor)
457
+
458
+ latents = torch.randn(shape, generator=generator, device=self.device, dtype=text_embeddings.dtype)
459
+ latents = latents * self.scheduler.init_noise_sigma
460
+
461
+ # 5. Denoising Loop (The "Fancy" Part)
462
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
463
+ for i, t in enumerate(timesteps):
464
+ # Expand latents for classifier-free guidance
465
+ latent_model_input = torch.cat([latents] * 2) if guidance_scale > 1.0 else latents
466
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
467
+
468
+ # Predict noise
469
+ # Handle Classifier Free Guidance (Text + Image)
470
+ # We duplicate text embeddings for unconditional pass (usually empty string)
471
+ # Omitted complex CFG setup for brevity, assuming simple split
472
+
473
+ noise_pred = self.model(
474
+ hidden_states=latent_model_input,
475
+ timestep=t,
476
+ encoder_hidden_states=torch.cat([text_embeddings] * 2), # Simplified
477
+ visual_conditions=visual_embeddings_list * 2 if visual_embeddings_list else None,
478
+ video_frames=num_frames
479
+ ).sample
480
+
481
+ # Perform Guidance
482
+ if guidance_scale > 1.0:
483
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
484
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
485
+
486
+ # Compute previous noisy sample x_t -> x_t-1
487
+ latents = self.scheduler.step(noise_pred, t, latents, eta=eta).prev_sample
488
+ progress_bar.update()
489
+
490
+ # 6. Post-processing
491
+ if not output_type == "latent":
492
+ # self.vae.decode(latents / self.vae.config.scaling_factor) ...
493
+ pass # VAE Decode Logic
494
+
495
+ if not return_dict:
496
+ return (latents,)
497
+
498
+ return BaseOutput(images=latents) # Returning latents for simulation