Little-ECHO commited on
Commit
9b1b72d
·
verified ·
1 Parent(s): 19de5fc

Upload 14 files

Browse files
app.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Minimal Gradio wrapper for the given Qwen-Image-Edit inference script.
3
+
4
+ Features:
5
+ - Loads the model once and reuses it.
6
+ - Inputs: image, edit prompt, cond_b, cond_delta, optional model path.
7
+ - Matches your original settings (size 1024, steps=24, true_cfg_scale=4.0,
8
+ fixed seed=42, and the same GRAG scale structure repeated 60 times).
9
+
10
+ Run:
11
+ pip install gradio pillow torch
12
+ # plus your project deps providing hacked_models/* and model weights
13
+ python gradio_qwen_edit_minimal.py
14
+
15
+ Then open the local URL printed by Gradio.
16
+ """
17
+
18
+ import os
19
+ from typing import Optional
20
+
21
+ import gradio as gr
22
+ import torch
23
+ from PIL import Image
24
+ from huggingface_hub import snapshot_download
25
+ import os
26
+ # --- your project imports (as in the original script) ---
27
+ from hacked_models.scheduler import FlowMatchEulerDiscreteScheduler
28
+ from hacked_models.pipeline import QwenImageEditPipeline
29
+ from hacked_models.models import QwenImageTransformer2DModel
30
+ from hacked_models.utils import seed_everything
31
+ from huggingface_hub import snapshot_download
32
+ from requests.exceptions import ChunkedEncodingError
33
+ from urllib3.exceptions import ProtocolError
34
+ import os, time
35
+
36
+ def robust_snapshot_download(repo_id, local_dir, token=None, retries=5):
37
+ os.makedirs(local_dir, exist_ok=True)
38
+ os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1") # 可选:更稳更快
39
+
40
+ last_err = None
41
+ for i in range(retries):
42
+ try:
43
+ return snapshot_download(
44
+ repo_id=repo_id,
45
+ local_dir=local_dir,
46
+ local_dir_use_symlinks=False,
47
+ resume_download=True, # 断点续传
48
+ use_auth_token=token,
49
+ max_workers=4 # 并行下载分片
50
+ )
51
+ except (ChunkedEncodingError, ProtocolError) as e:
52
+ last_err = e
53
+ wait = min(2**i, 30)
54
+ print(f"[download] network error {i+1}/{retries}: {e}; retry in {wait}s", flush=True)
55
+ time.sleep(wait)
56
+ raise RuntimeError(f"Download failed after {retries} retries: {last_err}")
57
+
58
+
59
+ # -----------------------------
60
+ # Global state
61
+ # -----------------------------
62
+ _DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
63
+ _DTYPE = torch.bfloat16 if _DEVICE == "cuda" else torch.float32
64
+ _PIPELINE: Optional[QwenImageEditPipeline] = None
65
+ _LOADED_MODEL_PATH: Optional[str] = None
66
+
67
+
68
+ def _load_pipeline(model_path: str) -> QwenImageEditPipeline:
69
+ """Load (or reuse) the pipeline for the given model_path."""
70
+ global _PIPELINE, _LOADED_MODEL_PATH
71
+ if _PIPELINE is not None and _LOADED_MODEL_PATH == model_path:
72
+ return _PIPELINE
73
+
74
+ # Set seed once (matches original)
75
+ seed_everything(42)
76
+
77
+ # Load components
78
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
79
+ os.path.join(model_path, "scheduler"), torch_dtype=_DTYPE
80
+ )
81
+ transformer = QwenImageTransformer2DModel.from_pretrained(
82
+ os.path.join(model_path, "transformer"), torch_dtype=_DTYPE
83
+ )
84
+
85
+ pipe = QwenImageEditPipeline.from_pretrained(
86
+ model_path, torch_dtype=_DTYPE, scheduler=scheduler, transformer=transformer
87
+ )
88
+
89
+ pipe.set_progress_bar_config(disable=None)
90
+ pipe.to(_DTYPE)
91
+ pipe.to(_DEVICE)
92
+
93
+ _PIPELINE = pipe
94
+ _LOADED_MODEL_PATH = model_path
95
+ return pipe
96
+
97
+
98
+ def _build_grag_scale(cond_b: float, cond_delta: float, repeats: int = 60):
99
+ """Replicates your original GRAG schedule structure.
100
+
101
+ Each element is: ((512, 1.0, 1.0), (4096, cond_b, cond_delta))
102
+ """
103
+ return [((512, 1.0, 1.0), (4096, cond_b, cond_delta))] * repeats
104
+
105
+
106
+ def predict(
107
+ image: Image.Image,
108
+ edit_prompt: str,
109
+ cond_b: float,
110
+ cond_delta: float,
111
+ ):
112
+ if image is None or not edit_prompt:
113
+ return None
114
+
115
+
116
+ # Match original preprocessing
117
+ input_image = image.convert("RGB").resize((1024, 1024))
118
+
119
+ inputs = {
120
+ "image": input_image,
121
+ "prompt": edit_prompt,
122
+ "generator": torch.manual_seed(42),
123
+ "true_cfg_scale": 4.0,
124
+ "negative_prompt": " ",
125
+ "num_inference_steps": 24,
126
+ "return_dict": False,
127
+ "grag_scale": _build_grag_scale(cond_b, cond_delta, repeats=60),
128
+ }
129
+
130
+ with torch.inference_mode():
131
+ image_batch, x0_images, saved_outputs = pipe(**inputs)
132
+
133
+ # Return the first image (same as original save behavior)
134
+ return image_batch[0]
135
+
136
+
137
+
138
+
139
+
140
+ model_dir = "/mmu-vcg/niuxuesong/zxp_echo/202509GRAG/Checkpoint/Qwen-Image-Edit"
141
+ repo_id = "Qwen/Qwen-Image-Edit"
142
+
143
+ if not os.path.exists(model_dir) or not os.listdir(model_dir):
144
+ robust_snapshot_download(repo_id, model_dir, token=os.getenv("HF_TOKEN"))
145
+ print(f"Model downloaded to {model_dir}")
146
+ else:
147
+ print(f"Model already exists at {model_dir}")
148
+
149
+
150
+
151
+ pipe = _load_pipeline(model_dir)
152
+
153
+
154
+ with gr.Blocks(title="Qwen Image Edit — Minimal GRAG Demo") as demo:
155
+ gr.Markdown("# Qwen Image Edit — Minimal GRAG Demo\nUpload an image, enter your edit instruction, and set GRAG params.")
156
+
157
+
158
+ with gr.Row():
159
+ in_image = gr.Image(label="Input Image", type="pil")
160
+ out_image = gr.Image(label="Edited Output", type="pil")
161
+
162
+ edit_prompt = gr.Textbox(label="Edit Instruction", placeholder="e.g., Put a pair of black-framed glasses on him.")
163
+ with gr.Row():
164
+ cond_b = gr.Slider(label="cond_b", minimum=0.8, maximum=2.0, value=1.0, step=0.01)
165
+ cond_delta = gr.Slider(label="cond_delta", minimum=0.8, maximum=2.0, value=1.0, step=0.01)
166
+
167
+ run_btn = gr.Button("Run Edit")
168
+
169
+ run_btn.click(
170
+ fn=predict,
171
+ inputs=[in_image, edit_prompt, cond_b, cond_delta],
172
+ outputs=[out_image],
173
+ api_name="run_edit",
174
+ )
175
+
176
+ gr.Markdown(
177
+ """
178
+ **Notes**
179
+ - Uses fixed seed=42 and num_inference_steps=24 to match your script.
180
+ - Resizes the input to 1024×1024 before inference (as in your code).
181
+ - `grag_scale` is built as a list of length 60 with the same tuples.
182
+ - Automatically chooses CUDA if available; otherwise runs on CPU.
183
+ """
184
+ )
185
+
186
+ if __name__ == "__main__":
187
+ demo.queue().launch(share=True)
hacked_models/__pycache__/models.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
hacked_models/__pycache__/models.cpython-311.pyc ADDED
Binary file (40.9 kB). View file
 
hacked_models/__pycache__/pipeline.cpython-310.pyc ADDED
Binary file (32.3 kB). View file
 
hacked_models/__pycache__/pipeline.cpython-311.pyc ADDED
Binary file (57.3 kB). View file
 
hacked_models/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
hacked_models/__pycache__/scheduler.cpython-311.pyc ADDED
Binary file (27.1 kB). View file
 
hacked_models/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
hacked_models/attention.py ADDED
The diff for this file is too large to render. See raw diff
 
hacked_models/models.py ADDED
@@ -0,0 +1,687 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Qwen-Image Team, The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ import math
17
+ from typing import Any, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+
24
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
25
+ from diffusers.loaders import FromOriginalModelMixin, PeftAdapterMixin
26
+ from diffusers.utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
27
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
28
+ from diffusers.models.attention import AttentionMixin, FeedForward
29
+ from diffusers.models.attention_dispatch import dispatch_attention_fn
30
+ from diffusers.models.attention_processor import Attention
31
+ from diffusers.models.cache_utils import CacheMixin
32
+ from diffusers.models.embeddings import TimestepEmbedding, Timesteps
33
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
34
+ from diffusers.models.modeling_utils import ModelMixin
35
+ from diffusers.models.normalization import AdaLayerNormContinuous, RMSNorm
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+
42
+ def _lastdim_l2norm(x: torch.Tensor, eps: float = 1e-12):
43
+ # 返回最后一维的 L2 范数,保持维度
44
+ return
45
+
46
+
47
+ def get_timestep_embedding(
48
+ timesteps: torch.Tensor,
49
+ embedding_dim: int,
50
+ flip_sin_to_cos: bool = False,
51
+ downscale_freq_shift: float = 1,
52
+ scale: float = 1,
53
+ max_period: int = 10000,
54
+ ) -> torch.Tensor:
55
+ """
56
+ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
57
+
58
+ Args
59
+ timesteps (torch.Tensor):
60
+ a 1-D Tensor of N indices, one per batch element. These may be fractional.
61
+ embedding_dim (int):
62
+ the dimension of the output.
63
+ flip_sin_to_cos (bool):
64
+ Whether the embedding order should be `cos, sin` (if True) or `sin, cos` (if False)
65
+ downscale_freq_shift (float):
66
+ Controls the delta between frequencies between dimensions
67
+ scale (float):
68
+ Scaling factor applied to the embeddings.
69
+ max_period (int):
70
+ Controls the maximum frequency of the embeddings
71
+ Returns
72
+ torch.Tensor: an [N x dim] Tensor of positional embeddings.
73
+ """
74
+ assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
75
+
76
+ half_dim = embedding_dim // 2
77
+ exponent = -math.log(max_period) * torch.arange(
78
+ start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
79
+ )
80
+ exponent = exponent / (half_dim - downscale_freq_shift)
81
+
82
+ emb = torch.exp(exponent).to(timesteps.dtype)
83
+ emb = timesteps[:, None].float() * emb[None, :]
84
+
85
+ # scale embeddings
86
+ emb = scale * emb
87
+
88
+ # concat sine and cosine embeddings
89
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
90
+
91
+ # flip sine and cosine embeddings
92
+ if flip_sin_to_cos:
93
+ emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
94
+
95
+ # zero pad
96
+ if embedding_dim % 2 == 1:
97
+ emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
98
+ return emb
99
+
100
+
101
+ def apply_rotary_emb_qwen(
102
+ x: torch.Tensor,
103
+ freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]],
104
+ use_real: bool = True,
105
+ use_real_unbind_dim: int = -1,
106
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
107
+ """
108
+ Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings
109
+ to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are
110
+ reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting
111
+ tensors contain rotary embeddings and are returned as real tensors.
112
+
113
+ Args:
114
+ x (`torch.Tensor`):
115
+ Query or key tensor to apply rotary embeddings. [B, S, H, D] xk (torch.Tensor): Key tensor to apply
116
+ freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],)
117
+
118
+ Returns:
119
+ Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.
120
+ """
121
+ if use_real:
122
+ cos, sin = freqs_cis # [S, D]
123
+ cos = cos[None, None]
124
+ sin = sin[None, None]
125
+ cos, sin = cos.to(x.device), sin.to(x.device)
126
+
127
+ if use_real_unbind_dim == -1:
128
+ # Used for flux, cogvideox, hunyuan-dit
129
+ x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) # [B, S, H, D//2]
130
+ x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3)
131
+ elif use_real_unbind_dim == -2:
132
+ # Used for Stable Audio, OmniGen, CogView4 and Cosmos
133
+ x_real, x_imag = x.reshape(*x.shape[:-1], 2, -1).unbind(-2) # [B, S, H, D//2]
134
+ x_rotated = torch.cat([-x_imag, x_real], dim=-1)
135
+ else:
136
+ raise ValueError(f"`use_real_unbind_dim={use_real_unbind_dim}` but should be -1 or -2.")
137
+
138
+ out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype)
139
+
140
+ return out
141
+ else:
142
+ x_rotated = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
143
+ freqs_cis = freqs_cis.unsqueeze(1)
144
+ x_out = torch.view_as_real(x_rotated * freqs_cis).flatten(3)
145
+
146
+ return x_out.type_as(x)
147
+
148
+
149
+ class QwenTimestepProjEmbeddings(nn.Module):
150
+ def __init__(self, embedding_dim):
151
+ super().__init__()
152
+
153
+ self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0, scale=1000)
154
+ self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim)
155
+
156
+ def forward(self, timestep, hidden_states):
157
+ timesteps_proj = self.time_proj(timestep)
158
+ timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=hidden_states.dtype)) # (N, D)
159
+
160
+ conditioning = timesteps_emb
161
+
162
+ return conditioning
163
+
164
+
165
+ class QwenEmbedRope(nn.Module):
166
+ def __init__(self, theta: int, axes_dim: List[int], scale_rope=False):
167
+ super().__init__()
168
+ self.theta = theta
169
+ self.axes_dim = axes_dim
170
+ pos_index = torch.arange(4096)
171
+ neg_index = torch.arange(4096).flip(0) * -1 - 1
172
+ self.pos_freqs = torch.cat(
173
+ [
174
+ self.rope_params(pos_index, self.axes_dim[0], self.theta),
175
+ self.rope_params(pos_index, self.axes_dim[1], self.theta),
176
+ self.rope_params(pos_index, self.axes_dim[2], self.theta),
177
+ ],
178
+ dim=1,
179
+ )
180
+ self.neg_freqs = torch.cat(
181
+ [
182
+ self.rope_params(neg_index, self.axes_dim[0], self.theta),
183
+ self.rope_params(neg_index, self.axes_dim[1], self.theta),
184
+ self.rope_params(neg_index, self.axes_dim[2], self.theta),
185
+ ],
186
+ dim=1,
187
+ )
188
+ self.rope_cache = {}
189
+
190
+ # DO NOT USING REGISTER BUFFER HERE, IT WILL CAUSE COMPLEX NUMBERS LOSE ITS IMAGINARY PART
191
+ self.scale_rope = scale_rope
192
+
193
+ def rope_params(self, index, dim, theta=10000):
194
+ """
195
+ Args:
196
+ index: [0, 1, 2, 3] 1D Tensor representing the position index of the token
197
+ """
198
+ assert dim % 2 == 0
199
+ freqs = torch.outer(index, 1.0 / torch.pow(theta, torch.arange(0, dim, 2).to(torch.float32).div(dim)))
200
+ freqs = torch.polar(torch.ones_like(freqs), freqs)
201
+ return freqs
202
+
203
+ def forward(self, video_fhw, txt_seq_lens, device):
204
+ """
205
+ Args: video_fhw: [frame, height, width] a list of 3 integers representing the shape of the video Args:
206
+ txt_length: [bs] a list of 1 integers representing the length of the text
207
+ """
208
+ if self.pos_freqs.device != device:
209
+ self.pos_freqs = self.pos_freqs.to(device)
210
+ self.neg_freqs = self.neg_freqs.to(device)
211
+
212
+ if isinstance(video_fhw, list):
213
+ video_fhw = video_fhw[0]
214
+ if not isinstance(video_fhw, list):
215
+ video_fhw = [video_fhw]
216
+
217
+ vid_freqs = []
218
+ max_vid_index = 0
219
+ for idx, fhw in enumerate(video_fhw):
220
+ frame, height, width = fhw
221
+ rope_key = f"{idx}_{height}_{width}"
222
+
223
+ if not torch.compiler.is_compiling():
224
+ if rope_key not in self.rope_cache:
225
+ self.rope_cache[rope_key] = self._compute_video_freqs(frame, height, width, idx)
226
+ video_freq = self.rope_cache[rope_key]
227
+ else:
228
+ video_freq = self._compute_video_freqs(frame, height, width, idx)
229
+ video_freq = video_freq.to(device)
230
+ vid_freqs.append(video_freq)
231
+
232
+ if self.scale_rope:
233
+ max_vid_index = max(height // 2, width // 2, max_vid_index)
234
+ else:
235
+ max_vid_index = max(height, width, max_vid_index)
236
+
237
+ max_len = max(txt_seq_lens)
238
+ txt_freqs = self.pos_freqs[max_vid_index : max_vid_index + max_len, ...]
239
+ vid_freqs = torch.cat(vid_freqs, dim=0)
240
+
241
+ return vid_freqs, txt_freqs
242
+
243
+ @functools.lru_cache(maxsize=None)
244
+ def _compute_video_freqs(self, frame, height, width, idx=0):
245
+ seq_lens = frame * height * width
246
+ freqs_pos = self.pos_freqs.split([x // 2 for x in self.axes_dim], dim=1)
247
+ freqs_neg = self.neg_freqs.split([x // 2 for x in self.axes_dim], dim=1)
248
+
249
+ freqs_frame = freqs_pos[0][idx : idx + frame].view(frame, 1, 1, -1).expand(frame, height, width, -1)
250
+ if self.scale_rope:
251
+ freqs_height = torch.cat([freqs_neg[1][-(height - height // 2) :], freqs_pos[1][: height // 2]], dim=0)
252
+ freqs_height = freqs_height.view(1, height, 1, -1).expand(frame, height, width, -1)
253
+ freqs_width = torch.cat([freqs_neg[2][-(width - width // 2) :], freqs_pos[2][: width // 2]], dim=0)
254
+ freqs_width = freqs_width.view(1, 1, width, -1).expand(frame, height, width, -1)
255
+ else:
256
+ freqs_height = freqs_pos[1][:height].view(1, height, 1, -1).expand(frame, height, width, -1)
257
+ freqs_width = freqs_pos[2][:width].view(1, 1, width, -1).expand(frame, height, width, -1)
258
+
259
+ freqs = torch.cat([freqs_frame, freqs_height, freqs_width], dim=-1).reshape(seq_lens, -1)
260
+ return freqs.clone().contiguous()
261
+
262
+
263
+ class QwenDoubleStreamAttnProcessor2_0:
264
+ """
265
+ Attention processor for Qwen double-stream architecture, matching DoubleStreamLayerMegatron logic. This processor
266
+ implements joint attention computation where text and image streams are processed together.
267
+ """
268
+
269
+ _attention_backend = None
270
+
271
+ def __init__(self):
272
+ if not hasattr(F, "scaled_dot_product_attention"):
273
+ raise ImportError(
274
+ "QwenDoubleStreamAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
275
+ )
276
+
277
+ def __call__(
278
+ self,
279
+ attn: Attention,
280
+ hidden_states: torch.FloatTensor, # Image stream
281
+ encoder_hidden_states: torch.FloatTensor = None, # Text stream
282
+ encoder_hidden_states_mask: torch.FloatTensor = None,
283
+ attention_mask: Optional[torch.FloatTensor] = None,
284
+ image_rotary_emb: Optional[torch.Tensor] = None,
285
+ grag_scale = ((512,1.0,1.0),(4096,1.0,1.0)),
286
+ ) -> torch.FloatTensor:
287
+ if encoder_hidden_states is None:
288
+ raise ValueError("QwenDoubleStreamAttnProcessor2_0 requires encoder_hidden_states (text stream)")
289
+
290
+ seq_txt = encoder_hidden_states.shape[1]
291
+
292
+ # Compute QKV for image stream (sample projections)
293
+ img_query = attn.to_q(hidden_states)
294
+ img_key = attn.to_k(hidden_states)
295
+ img_value = attn.to_v(hidden_states)
296
+
297
+ # Compute QKV for text stream (context projections)
298
+ txt_query = attn.add_q_proj(encoder_hidden_states)
299
+ txt_key = attn.add_k_proj(encoder_hidden_states)
300
+ txt_value = attn.add_v_proj(encoder_hidden_states)
301
+
302
+ # Reshape for multi-head attention
303
+ img_query = img_query.unflatten(-1, (attn.heads, -1))
304
+ img_key = img_key.unflatten(-1, (attn.heads, -1))
305
+ img_value = img_value.unflatten(-1, (attn.heads, -1))
306
+
307
+ txt_query = txt_query.unflatten(-1, (attn.heads, -1))
308
+ txt_key = txt_key.unflatten(-1, (attn.heads, -1))
309
+ txt_value = txt_value.unflatten(-1, (attn.heads, -1))
310
+
311
+ # Apply QK normalization
312
+ if attn.norm_q is not None:
313
+ img_query = attn.norm_q(img_query)
314
+ if attn.norm_k is not None:
315
+ img_key = attn.norm_k(img_key)
316
+ if attn.norm_added_q is not None:
317
+ txt_query = attn.norm_added_q(txt_query)
318
+ if attn.norm_added_k is not None:
319
+ txt_key = attn.norm_added_k(txt_key)
320
+
321
+
322
+ # Apply RoPE
323
+ if image_rotary_emb is not None:
324
+ img_freqs, txt_freqs = image_rotary_emb
325
+ img_query = apply_rotary_emb_qwen(img_query, img_freqs, use_real=False)
326
+ img_key = apply_rotary_emb_qwen(img_key, img_freqs, use_real=False)
327
+ txt_query = apply_rotary_emb_qwen(txt_query, txt_freqs, use_real=False)
328
+ txt_key = apply_rotary_emb_qwen(txt_key, txt_freqs, use_real=False)
329
+
330
+ if grag_scale != (0,0) :
331
+ txt_srag_scale , cond_srag_scale = grag_scale
332
+ txt_len, txt_bias_scale, txt_delta_scale = txt_srag_scale
333
+ img_len, img_bias_scale, img_delta_scale = cond_srag_scale
334
+
335
+
336
+ txt_key_mean = txt_key[:,:txt_len,:,:].mean(dim=1)
337
+ cond_key_mean = img_key[:,-1*img_len:,:,:].mean(dim=1)
338
+
339
+
340
+ txt_key[:,:txt_len,:,:] = txt_bias_scale * txt_key_mean + (txt_key[:,:txt_len,:,:] - txt_key_mean) * txt_delta_scale
341
+ img_key[:,-1*img_len:,:,:] = img_bias_scale * cond_key_mean + (img_key[:,-1*img_len:,:,:] - cond_key_mean) * img_delta_scale
342
+
343
+ # print(f"Implemented GRAG: txt_srag_scale:{txt_srag_scale}, cond_grag_scale: {cond_srag_scale}\n")
344
+
345
+ joint_query = torch.cat([txt_query, img_query], dim=1)
346
+ joint_key = torch.cat([txt_key, img_key], dim=1)
347
+ joint_value = torch.cat([txt_value, img_value], dim=1)
348
+ # print(img_query.size())
349
+ hook_out = {"query":joint_query.clone().detach(),
350
+ "key":joint_key.clone().detach(),
351
+ "value":joint_value.clone().detach()}
352
+
353
+ # Compute joint attention
354
+ joint_hidden_states = dispatch_attention_fn(
355
+ joint_query,
356
+ joint_key,
357
+ joint_value,
358
+ attn_mask=attention_mask,
359
+ dropout_p=0.0,
360
+ is_causal=False,
361
+ backend=self._attention_backend,
362
+ )
363
+
364
+ # Reshape back
365
+ joint_hidden_states = joint_hidden_states.flatten(2, 3)
366
+ joint_hidden_states = joint_hidden_states.to(joint_query.dtype)
367
+
368
+ # Split attention outputs back
369
+ txt_attn_output = joint_hidden_states[:, :seq_txt, :] # Text part
370
+ img_attn_output = joint_hidden_states[:, seq_txt:, :] # Image part
371
+
372
+ # Apply output projections
373
+ img_attn_output = attn.to_out[0](img_attn_output)
374
+ if len(attn.to_out) > 1:
375
+ img_attn_output = attn.to_out[1](img_attn_output) # dropout
376
+
377
+ txt_attn_output = attn.to_add_out(txt_attn_output)
378
+
379
+ return (img_attn_output, txt_attn_output), hook_out
380
+
381
+
382
+ @maybe_allow_in_graph
383
+ class QwenImageTransformerBlock(nn.Module):
384
+ def __init__(
385
+ self, dim: int, num_attention_heads: int, attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6
386
+ ):
387
+ super().__init__()
388
+
389
+ self.dim = dim
390
+ self.num_attention_heads = num_attention_heads
391
+ self.attention_head_dim = attention_head_dim
392
+
393
+ # Image processing modules
394
+ self.img_mod = nn.Sequential(
395
+ nn.SiLU(),
396
+ nn.Linear(dim, 6 * dim, bias=True), # For scale, shift, gate for norm1 and norm2
397
+ )
398
+ self.img_norm1 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps)
399
+ self.attn = Attention(
400
+ query_dim=dim,
401
+ cross_attention_dim=None, # Enable cross attention for joint computation
402
+ added_kv_proj_dim=dim, # Enable added KV projections for text stream
403
+ dim_head=attention_head_dim,
404
+ heads=num_attention_heads,
405
+ out_dim=dim,
406
+ context_pre_only=False,
407
+ bias=True,
408
+ processor=QwenDoubleStreamAttnProcessor2_0(),
409
+ qk_norm=qk_norm,
410
+ eps=eps,
411
+ )
412
+ self.img_norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps)
413
+ self.img_mlp = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
414
+
415
+ # Text processing modules
416
+ self.txt_mod = nn.Sequential(
417
+ nn.SiLU(),
418
+ nn.Linear(dim, 6 * dim, bias=True), # For scale, shift, gate for norm1 and norm2
419
+ )
420
+ self.txt_norm1 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps)
421
+ # Text doesn't need separate attention - it's handled by img_attn joint computation
422
+ self.txt_norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=eps)
423
+ self.txt_mlp = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
424
+
425
+ def _modulate(self, x, mod_params):
426
+ """Apply modulation to input tensor"""
427
+ shift, scale, gate = mod_params.chunk(3, dim=-1)
428
+ return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1), gate.unsqueeze(1)
429
+
430
+ def forward(
431
+ self,
432
+ hidden_states: torch.Tensor,
433
+ encoder_hidden_states: torch.Tensor,
434
+ encoder_hidden_states_mask: torch.Tensor,
435
+ temb: torch.Tensor,
436
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
437
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
438
+ grag_scale = ((512,1.0,1.0),(512,1.0,1.0)),
439
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
440
+ # Get modulation parameters for both streams
441
+ img_mod_params = self.img_mod(temb) # [B, 6*dim]
442
+ txt_mod_params = self.txt_mod(temb) # [B, 6*dim]
443
+
444
+ # Split modulation parameters for norm1 and norm2
445
+ img_mod1, img_mod2 = img_mod_params.chunk(2, dim=-1) # Each [B, 3*dim]
446
+ txt_mod1, txt_mod2 = txt_mod_params.chunk(2, dim=-1) # Each [B, 3*dim]
447
+
448
+ # Process image stream - norm1 + modulation
449
+ img_normed = self.img_norm1(hidden_states)
450
+ img_modulated, img_gate1 = self._modulate(img_normed, img_mod1)
451
+
452
+ # Process text stream - norm1 + modulation
453
+ txt_normed = self.txt_norm1(encoder_hidden_states)
454
+ txt_modulated, txt_gate1 = self._modulate(txt_normed, txt_mod1)
455
+
456
+ # Use QwenAttnProcessor2_0 for joint attention computation
457
+ # This directly implements the DoubleStreamLayerMegatron logic:
458
+ # 1. Computes QKV for both streams
459
+ # 2. Applies QK normalization and RoPE
460
+ # 3. Concatenates and runs joint attention
461
+ # 4. Splits results back to separate streams
462
+ joint_attention_kwargs = joint_attention_kwargs or {}
463
+ attn_output, hook_out = self.attn(
464
+ hidden_states=img_modulated, # Image stream (will be processed as "sample")
465
+ encoder_hidden_states=txt_modulated, # Text stream (will be processed as "context")
466
+ encoder_hidden_states_mask=encoder_hidden_states_mask,
467
+ image_rotary_emb=image_rotary_emb,
468
+ grag_scale = grag_scale,
469
+ **joint_attention_kwargs,
470
+ )
471
+
472
+ hook_out = None
473
+ del hook_out
474
+
475
+ # QwenAttnProcessor2_0 returns (img_output, txt_output) when encoder_hidden_states is provided
476
+ img_attn_output, txt_attn_output = attn_output
477
+
478
+ # Apply attention gates and add residual (like in Megatron)
479
+ hidden_states = hidden_states + img_gate1 * img_attn_output
480
+ encoder_hidden_states = encoder_hidden_states + txt_gate1 * txt_attn_output
481
+
482
+ # Process image stream - norm2 + MLP
483
+ img_normed2 = self.img_norm2(hidden_states)
484
+ img_modulated2, img_gate2 = self._modulate(img_normed2, img_mod2)
485
+ img_mlp_output = self.img_mlp(img_modulated2)
486
+ hidden_states = hidden_states + img_gate2 * img_mlp_output
487
+
488
+ # Process text stream - norm2 + MLP
489
+ txt_normed2 = self.txt_norm2(encoder_hidden_states)
490
+ txt_modulated2, txt_gate2 = self._modulate(txt_normed2, txt_mod2)
491
+ txt_mlp_output = self.txt_mlp(txt_modulated2)
492
+ encoder_hidden_states = encoder_hidden_states + txt_gate2 * txt_mlp_output
493
+
494
+ # Clip to prevent overflow for fp16
495
+ if encoder_hidden_states.dtype == torch.float16:
496
+ encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504)
497
+ if hidden_states.dtype == torch.float16:
498
+ hidden_states = hidden_states.clip(-65504, 65504)
499
+
500
+ return encoder_hidden_states, hidden_states
501
+
502
+
503
+ class QwenImageTransformer2DModel(
504
+ ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin
505
+ ):
506
+ """
507
+ The Transformer model introduced in Qwen.
508
+
509
+ Args:
510
+ patch_size (`int`, defaults to `2`):
511
+ Patch size to turn the input data into small patches.
512
+ in_channels (`int`, defaults to `64`):
513
+ The number of channels in the input.
514
+ out_channels (`int`, *optional*, defaults to `None`):
515
+ The number of channels in the output. If not specified, it defaults to `in_channels`.
516
+ num_layers (`int`, defaults to `60`):
517
+ The number of layers of dual stream DiT blocks to use.
518
+ attention_head_dim (`int`, defaults to `128`):
519
+ The number of dimensions to use for each attention head.
520
+ num_attention_heads (`int`, defaults to `24`):
521
+ The number of attention heads to use.
522
+ joint_attention_dim (`int`, defaults to `3584`):
523
+ The number of dimensions to use for the joint attention (embedding/channel dimension of
524
+ `encoder_hidden_states`).
525
+ guidance_embeds (`bool`, defaults to `False`):
526
+ Whether to use guidance embeddings for guidance-distilled variant of the model.
527
+ axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`):
528
+ The dimensions to use for the rotary positional embeddings.
529
+ """
530
+
531
+ _supports_gradient_checkpointing = True
532
+ _no_split_modules = ["QwenImageTransformerBlock"]
533
+ _skip_layerwise_casting_patterns = ["pos_embed", "norm"]
534
+ _repeated_blocks = ["QwenImageTransformerBlock"]
535
+
536
+ @register_to_config
537
+ def __init__(
538
+ self,
539
+ patch_size: int = 2,
540
+ in_channels: int = 64,
541
+ out_channels: Optional[int] = 16,
542
+ num_layers: int = 60,
543
+ attention_head_dim: int = 128,
544
+ num_attention_heads: int = 24,
545
+ joint_attention_dim: int = 3584,
546
+ guidance_embeds: bool = False, # TODO: this should probably be removed
547
+ axes_dims_rope: Tuple[int, int, int] = (16, 56, 56),
548
+ ):
549
+ super().__init__()
550
+ self.out_channels = out_channels or in_channels
551
+ self.inner_dim = num_attention_heads * attention_head_dim
552
+
553
+ self.pos_embed = QwenEmbedRope(theta=10000, axes_dim=list(axes_dims_rope), scale_rope=True)
554
+
555
+ self.time_text_embed = QwenTimestepProjEmbeddings(embedding_dim=self.inner_dim)
556
+
557
+ self.txt_norm = RMSNorm(joint_attention_dim, eps=1e-6)
558
+
559
+ self.img_in = nn.Linear(in_channels, self.inner_dim)
560
+ self.txt_in = nn.Linear(joint_attention_dim, self.inner_dim)
561
+
562
+ self.transformer_blocks = nn.ModuleList(
563
+ [
564
+ QwenImageTransformerBlock(
565
+ dim=self.inner_dim,
566
+ num_attention_heads=num_attention_heads,
567
+ attention_head_dim=attention_head_dim,
568
+ )
569
+ for _ in range(num_layers)
570
+ ]
571
+ )
572
+
573
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
574
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
575
+
576
+ self.gradient_checkpointing = False
577
+
578
+ def forward(
579
+ self,
580
+ hidden_states: torch.Tensor,
581
+ encoder_hidden_states: torch.Tensor = None,
582
+ encoder_hidden_states_mask: torch.Tensor = None,
583
+ timestep: torch.LongTensor = None,
584
+ img_shapes: Optional[List[Tuple[int, int, int]]] = None,
585
+ txt_seq_lens: Optional[List[int]] = None,
586
+ guidance: torch.Tensor = None, # TODO: this should probably be removed
587
+ attention_kwargs: Optional[Dict[str, Any]] = None,
588
+ controlnet_block_samples=None,
589
+ return_dict: bool = True,
590
+ grag_scale = [((512,1.0,1.0),(512,1.0,1.0))] * 60,
591
+ ) -> Union[torch.Tensor, Transformer2DModelOutput]:
592
+ """
593
+ The [`QwenTransformer2DModel`] forward method.
594
+
595
+ Args:
596
+ hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`):
597
+ Input `hidden_states`.
598
+ encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`):
599
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
600
+ encoder_hidden_states_mask (`torch.Tensor` of shape `(batch_size, text_sequence_length)`):
601
+ Mask of the input conditions.
602
+ timestep ( `torch.LongTensor`):
603
+ Used to indicate denoising step.
604
+ attention_kwargs (`dict`, *optional*):
605
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
606
+ `self.processor` in
607
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
608
+ return_dict (`bool`, *optional*, defaults to `True`):
609
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
610
+ tuple.
611
+
612
+ Returns:
613
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
614
+ `tuple` where the first element is the sample tensor.
615
+ """
616
+ if attention_kwargs is not None:
617
+ attention_kwargs = attention_kwargs.copy()
618
+ lora_scale = attention_kwargs.pop("scale", 1.0)
619
+ else:
620
+ lora_scale = 1.0
621
+
622
+ if USE_PEFT_BACKEND:
623
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
624
+ scale_lora_layers(self, lora_scale)
625
+ else:
626
+ if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
627
+ logger.warning(
628
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
629
+ )
630
+
631
+ hidden_states = self.img_in(hidden_states)
632
+
633
+ timestep = timestep.to(hidden_states.dtype)
634
+ encoder_hidden_states = self.txt_norm(encoder_hidden_states)
635
+ encoder_hidden_states = self.txt_in(encoder_hidden_states)
636
+
637
+ if guidance is not None:
638
+ guidance = guidance.to(hidden_states.dtype) * 1000
639
+
640
+ temb = (
641
+ self.time_text_embed(timestep, hidden_states)
642
+ if guidance is None
643
+ else self.time_text_embed(timestep, guidance, hidden_states)
644
+ )
645
+
646
+ image_rotary_emb = self.pos_embed(img_shapes, txt_seq_lens, device=hidden_states.device)
647
+
648
+ for index_block, block in enumerate(self.transformer_blocks):
649
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
650
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
651
+ block,
652
+ hidden_states,
653
+ encoder_hidden_states,
654
+ encoder_hidden_states_mask,
655
+ temb,
656
+ image_rotary_emb,
657
+ )
658
+
659
+ else:
660
+ encoder_hidden_states, hidden_states = block(
661
+ hidden_states=hidden_states,
662
+ encoder_hidden_states=encoder_hidden_states,
663
+ encoder_hidden_states_mask=encoder_hidden_states_mask,
664
+ temb=temb,
665
+ image_rotary_emb=image_rotary_emb,
666
+ joint_attention_kwargs=attention_kwargs,
667
+ grag_scale = grag_scale.pop() ,
668
+ )
669
+
670
+ # controlnet residual
671
+ if controlnet_block_samples is not None:
672
+ interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
673
+ interval_control = int(np.ceil(interval_control))
674
+ hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
675
+
676
+ # Use only the image part (hidden_states) from the dual-stream blocks
677
+ hidden_states = self.norm_out(hidden_states, temb)
678
+ output = self.proj_out(hidden_states)
679
+
680
+ if USE_PEFT_BACKEND:
681
+ # remove `lora_scale` from each PEFT layer
682
+ unscale_lora_layers(self, lora_scale)
683
+
684
+ if not return_dict:
685
+ return (output,)
686
+
687
+ return Transformer2DModelOutput(sample=output)
hacked_models/pipeline.py ADDED
@@ -0,0 +1,938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Qwen-Image Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ import math
17
+ from typing import Any, Callable, Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor
22
+
23
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
24
+ from diffusers.loaders import QwenImageLoraLoaderMixin
25
+ from diffusers.models import AutoencoderKLQwenImage, QwenImageTransformer2DModel
26
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
27
+ from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring
28
+ from diffusers.utils.torch_utils import randn_tensor
29
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
30
+ from diffusers.pipelines.qwenimage.pipeline_output import QwenImagePipelineOutput
31
+
32
+
33
+ if is_torch_xla_available():
34
+ import torch_xla.core.xla_model as xm
35
+
36
+ XLA_AVAILABLE = True
37
+ else:
38
+ XLA_AVAILABLE = False
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+ EXAMPLE_DOC_STRING = """
44
+ Examples:
45
+ ```py
46
+ >>> import torch
47
+ >>> from PIL import Image
48
+ >>> from diffusers import QwenImageEditPipeline
49
+ >>> from diffusers.utils import load_image
50
+
51
+ >>> pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=torch.bfloat16)
52
+ >>> pipe.to("cuda")
53
+ >>> image = load_image(
54
+ ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/yarn-art-pikachu.png"
55
+ ... ).convert("RGB")
56
+ >>> prompt = (
57
+ ... "Make Pikachu hold a sign that says 'Qwen Edit is awesome', yarn art style, detailed, vibrant colors"
58
+ ... )
59
+ >>> # Depending on the variant being used, the pipeline call will slightly vary.
60
+ >>> # Refer to the pipeline documentation for more details.
61
+ >>> image = pipe(image, prompt, num_inference_steps=50).images[0]
62
+ >>> image.save("qwenimage_edit.png")
63
+ ```
64
+ """
65
+
66
+
67
+ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.calculate_shift
68
+ def calculate_shift(
69
+ image_seq_len,
70
+ base_seq_len: int = 256,
71
+ max_seq_len: int = 4096,
72
+ base_shift: float = 0.5,
73
+ max_shift: float = 1.15,
74
+ ):
75
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
76
+ b = base_shift - m * base_seq_len
77
+ mu = image_seq_len * m + b
78
+ return mu
79
+
80
+
81
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
82
+ def retrieve_timesteps(
83
+ scheduler,
84
+ num_inference_steps: Optional[int] = None,
85
+ device: Optional[Union[str, torch.device]] = None,
86
+ timesteps: Optional[List[int]] = None,
87
+ sigmas: Optional[List[float]] = None,
88
+ **kwargs,
89
+ ):
90
+ r"""
91
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
92
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
93
+
94
+ Args:
95
+ scheduler (`SchedulerMixin`):
96
+ The scheduler to get timesteps from.
97
+ num_inference_steps (`int`):
98
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
99
+ must be `None`.
100
+ device (`str` or `torch.device`, *optional*):
101
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
102
+ timesteps (`List[int]`, *optional*):
103
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
104
+ `num_inference_steps` and `sigmas` must be `None`.
105
+ sigmas (`List[float]`, *optional*):
106
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
107
+ `num_inference_steps` and `timesteps` must be `None`.
108
+
109
+ Returns:
110
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
111
+ second element is the number of inference steps.
112
+ """
113
+ if timesteps is not None and sigmas is not None:
114
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
115
+ if timesteps is not None:
116
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
117
+ if not accepts_timesteps:
118
+ raise ValueError(
119
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
120
+ f" timestep schedules. Please check whether you are using the correct scheduler."
121
+ )
122
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
123
+ timesteps = scheduler.timesteps
124
+ num_inference_steps = len(timesteps)
125
+ elif sigmas is not None:
126
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
127
+ if not accept_sigmas:
128
+ raise ValueError(
129
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
130
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
131
+ )
132
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
133
+ timesteps = scheduler.timesteps
134
+ num_inference_steps = len(timesteps)
135
+ else:
136
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
137
+ timesteps = scheduler.timesteps
138
+ return timesteps, num_inference_steps
139
+
140
+
141
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
142
+ def retrieve_latents(
143
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
144
+ ):
145
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
146
+ return encoder_output.latent_dist.sample(generator)
147
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
148
+ return encoder_output.latent_dist.mode()
149
+ elif hasattr(encoder_output, "latents"):
150
+ return encoder_output.latents
151
+ else:
152
+ raise AttributeError("Could not access latents of provided encoder_output")
153
+
154
+
155
+ def calculate_dimensions(target_area, ratio):
156
+ width = math.sqrt(target_area * ratio)
157
+ height = width / ratio
158
+
159
+ width = round(width / 32) * 32
160
+ height = round(height / 32) * 32
161
+
162
+ return width, height, None
163
+
164
+
165
+ class QwenImageEditPipeline(DiffusionPipeline, QwenImageLoraLoaderMixin):
166
+ r"""
167
+ The Qwen-Image-Edit pipeline for image editing.
168
+
169
+ Args:
170
+ transformer ([`QwenImageTransformer2DModel`]):
171
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
172
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
173
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
174
+ vae ([`AutoencoderKL`]):
175
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
176
+ text_encoder ([`Qwen2.5-VL-7B-Instruct`]):
177
+ [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct), specifically the
178
+ [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) variant.
179
+ tokenizer (`QwenTokenizer`):
180
+ Tokenizer of class
181
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
182
+ """
183
+
184
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
185
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
186
+
187
+ def __init__(
188
+ self,
189
+ scheduler: FlowMatchEulerDiscreteScheduler,
190
+ vae: AutoencoderKLQwenImage,
191
+ text_encoder: Qwen2_5_VLForConditionalGeneration,
192
+ tokenizer: Qwen2Tokenizer,
193
+ processor: Qwen2VLProcessor,
194
+ transformer: QwenImageTransformer2DModel,
195
+ ):
196
+ super().__init__()
197
+
198
+ self.register_modules(
199
+ vae=vae,
200
+ text_encoder=text_encoder,
201
+ tokenizer=tokenizer,
202
+ processor=processor,
203
+ transformer=transformer,
204
+ scheduler=scheduler,
205
+ )
206
+ self.vae_scale_factor = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8
207
+ self.latent_channels = self.vae.config.z_dim if getattr(self, "vae", None) else 16
208
+ # QwenImage latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible
209
+ # by the patch size. So the vae scale factor is multiplied by the patch size to account for this
210
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
211
+ self.vl_processor = processor
212
+ self.tokenizer_max_length = 1024
213
+
214
+ self.prompt_template_encode = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n"
215
+ self.prompt_template_encode_start_idx = 64
216
+ self.default_sample_size = 128
217
+
218
+ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._extract_masked_hidden
219
+ def _extract_masked_hidden(self, hidden_states: torch.Tensor, mask: torch.Tensor):
220
+ bool_mask = mask.bool()
221
+ valid_lengths = bool_mask.sum(dim=1)
222
+ selected = hidden_states[bool_mask]
223
+ split_result = torch.split(selected, valid_lengths.tolist(), dim=0)
224
+
225
+ return split_result
226
+
227
+ def _get_qwen_prompt_embeds(
228
+ self,
229
+ prompt: Union[str, List[str]] = None,
230
+ image: Optional[torch.Tensor] = None,
231
+ device: Optional[torch.device] = None,
232
+ dtype: Optional[torch.dtype] = None,
233
+ ):
234
+ device = device or self._execution_device
235
+ dtype = dtype or self.text_encoder.dtype
236
+
237
+ prompt = [prompt] if isinstance(prompt, str) else prompt
238
+
239
+ template = self.prompt_template_encode
240
+ drop_idx = self.prompt_template_encode_start_idx
241
+ txt = [template.format(e) for e in prompt]
242
+
243
+ model_inputs = self.processor(
244
+ text=txt,
245
+ images=image,
246
+ padding=True,
247
+ return_tensors="pt",
248
+ ).to(device)
249
+
250
+ outputs = self.text_encoder(
251
+ input_ids=model_inputs.input_ids,
252
+ attention_mask=model_inputs.attention_mask,
253
+ pixel_values=model_inputs.pixel_values,
254
+ image_grid_thw=model_inputs.image_grid_thw,
255
+ output_hidden_states=True,
256
+ )
257
+
258
+ hidden_states = outputs.hidden_states[-1]
259
+ split_hidden_states = self._extract_masked_hidden(hidden_states, model_inputs.attention_mask)
260
+ split_hidden_states = [e[drop_idx:] for e in split_hidden_states]
261
+ attn_mask_list = [torch.ones(e.size(0), dtype=torch.long, device=e.device) for e in split_hidden_states]
262
+ max_seq_len = max([e.size(0) for e in split_hidden_states])
263
+ prompt_embeds = torch.stack(
264
+ [torch.cat([u, u.new_zeros(max_seq_len - u.size(0), u.size(1))]) for u in split_hidden_states]
265
+ )
266
+ encoder_attention_mask = torch.stack(
267
+ [torch.cat([u, u.new_zeros(max_seq_len - u.size(0))]) for u in attn_mask_list]
268
+ )
269
+
270
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
271
+
272
+ return prompt_embeds, encoder_attention_mask
273
+
274
+ def encode_prompt(
275
+ self,
276
+ prompt: Union[str, List[str]],
277
+ image: Optional[torch.Tensor] = None,
278
+ device: Optional[torch.device] = None,
279
+ num_images_per_prompt: int = 1,
280
+ prompt_embeds: Optional[torch.Tensor] = None,
281
+ prompt_embeds_mask: Optional[torch.Tensor] = None,
282
+ max_sequence_length: int = 1024,
283
+ ):
284
+ r"""
285
+
286
+ Args:
287
+ prompt (`str` or `List[str]`, *optional*):
288
+ prompt to be encoded
289
+ image (`torch.Tensor`, *optional*):
290
+ image to be encoded
291
+ device: (`torch.device`):
292
+ torch device
293
+ num_images_per_prompt (`int`):
294
+ number of images that should be generated per prompt
295
+ prompt_embeds (`torch.Tensor`, *optional*):
296
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
297
+ provided, text embeddings will be generated from `prompt` input argument.
298
+ """
299
+ device = device or self._execution_device
300
+
301
+ prompt = [prompt] if isinstance(prompt, str) else prompt
302
+ batch_size = len(prompt) if prompt_embeds is None else prompt_embeds.shape[0]
303
+
304
+ if prompt_embeds is None:
305
+ prompt_embeds, prompt_embeds_mask = self._get_qwen_prompt_embeds(prompt, image, device)
306
+
307
+ _, seq_len, _ = prompt_embeds.shape
308
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
309
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
310
+ prompt_embeds_mask = prompt_embeds_mask.repeat(1, num_images_per_prompt, 1)
311
+ prompt_embeds_mask = prompt_embeds_mask.view(batch_size * num_images_per_prompt, seq_len)
312
+
313
+ return prompt_embeds, prompt_embeds_mask
314
+
315
+ def check_inputs(
316
+ self,
317
+ prompt,
318
+ height,
319
+ width,
320
+ negative_prompt=None,
321
+ prompt_embeds=None,
322
+ negative_prompt_embeds=None,
323
+ prompt_embeds_mask=None,
324
+ negative_prompt_embeds_mask=None,
325
+ callback_on_step_end_tensor_inputs=None,
326
+ max_sequence_length=None,
327
+ ):
328
+ if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0:
329
+ logger.warning(
330
+ f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly"
331
+ )
332
+
333
+ if callback_on_step_end_tensor_inputs is not None and not all(
334
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
335
+ ):
336
+ raise ValueError(
337
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
338
+ )
339
+
340
+ if prompt is not None and prompt_embeds is not None:
341
+ raise ValueError(
342
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
343
+ " only forward one of the two."
344
+ )
345
+ elif prompt is None and prompt_embeds is None:
346
+ raise ValueError(
347
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
348
+ )
349
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
350
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
351
+
352
+ if negative_prompt is not None and negative_prompt_embeds is not None:
353
+ raise ValueError(
354
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
355
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
356
+ )
357
+
358
+ if prompt_embeds is not None and prompt_embeds_mask is None:
359
+ raise ValueError(
360
+ "If `prompt_embeds` are provided, `prompt_embeds_mask` also have to be passed. Make sure to generate `prompt_embeds_mask` from the same text encoder that was used to generate `prompt_embeds`."
361
+ )
362
+ if negative_prompt_embeds is not None and negative_prompt_embeds_mask is None:
363
+ raise ValueError(
364
+ "If `negative_prompt_embeds` are provided, `negative_prompt_embeds_mask` also have to be passed. Make sure to generate `negative_prompt_embeds_mask` from the same text encoder that was used to generate `negative_prompt_embeds`."
365
+ )
366
+
367
+ if max_sequence_length is not None and max_sequence_length > 1024:
368
+ raise ValueError(f"`max_sequence_length` cannot be greater than 1024 but is {max_sequence_length}")
369
+
370
+ @staticmethod
371
+ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._pack_latents
372
+ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
373
+ latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
374
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
375
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
376
+
377
+ return latents
378
+
379
+ @staticmethod
380
+ # Copied from diffusers.pipelines.qwenimage.pipeline_qwenimage.QwenImagePipeline._unpack_latents
381
+ def _unpack_latents(latents, height, width, vae_scale_factor):
382
+ batch_size, num_patches, channels = latents.shape
383
+
384
+ # VAE applies 8x compression on images but we must also account for packing which requires
385
+ # latent height and width to be divisible by 2.
386
+ height = 2 * (int(height) // (vae_scale_factor * 2))
387
+ width = 2 * (int(width) // (vae_scale_factor * 2))
388
+
389
+ latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2)
390
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
391
+
392
+ latents = latents.reshape(batch_size, channels // (2 * 2), 1, height, width)
393
+
394
+ return latents
395
+
396
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
397
+ if isinstance(generator, list):
398
+ image_latents = [
399
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i], sample_mode="argmax")
400
+ for i in range(image.shape[0])
401
+ ]
402
+ image_latents = torch.cat(image_latents, dim=0)
403
+ else:
404
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode="argmax")
405
+ latents_mean = (
406
+ torch.tensor(self.vae.config.latents_mean)
407
+ .view(1, self.latent_channels, 1, 1, 1)
408
+ .to(image_latents.device, image_latents.dtype)
409
+ )
410
+ latents_std = (
411
+ torch.tensor(self.vae.config.latents_std)
412
+ .view(1, self.latent_channels, 1, 1, 1)
413
+ .to(image_latents.device, image_latents.dtype)
414
+ )
415
+ image_latents = (image_latents - latents_mean) / latents_std
416
+
417
+ return image_latents
418
+
419
+ def enable_vae_slicing(self):
420
+ r"""
421
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
422
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
423
+ """
424
+ self.vae.enable_slicing()
425
+
426
+ def disable_vae_slicing(self):
427
+ r"""
428
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
429
+ computing decoding in one step.
430
+ """
431
+ self.vae.disable_slicing()
432
+
433
+ def enable_vae_tiling(self):
434
+ r"""
435
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
436
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
437
+ processing larger images.
438
+ """
439
+ self.vae.enable_tiling()
440
+
441
+ def disable_vae_tiling(self):
442
+ r"""
443
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
444
+ computing decoding in one step.
445
+ """
446
+ self.vae.disable_tiling()
447
+
448
+ def prepare_latents(
449
+ self,
450
+ image,
451
+ batch_size,
452
+ num_channels_latents,
453
+ height,
454
+ width,
455
+ dtype,
456
+ device,
457
+ generator,
458
+ latents=None,
459
+ ):
460
+ # VAE applies 8x compression on images but we must also account for packing which requires
461
+ # latent height and width to be divisible by 2.
462
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
463
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
464
+
465
+ shape = (batch_size, 1, num_channels_latents, height, width)
466
+
467
+ image_latents = None
468
+ if image is not None:
469
+ image = image.to(device=device, dtype=dtype)
470
+ if image.shape[1] != self.latent_channels:
471
+ image_latents = self._encode_vae_image(image=image, generator=generator)
472
+ else:
473
+ image_latents = image
474
+ if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
475
+ # expand init_latents for batch_size
476
+ additional_image_per_prompt = batch_size // image_latents.shape[0]
477
+ image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
478
+ elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
479
+ raise ValueError(
480
+ f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
481
+ )
482
+ else:
483
+ image_latents = torch.cat([image_latents], dim=0)
484
+
485
+ image_latent_height, image_latent_width = image_latents.shape[3:]
486
+ image_latents = self._pack_latents(
487
+ image_latents, batch_size, num_channels_latents, image_latent_height, image_latent_width
488
+ )
489
+
490
+ if isinstance(generator, list) and len(generator) != batch_size:
491
+ raise ValueError(
492
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
493
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
494
+ )
495
+ if latents is None:
496
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
497
+ latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
498
+ else:
499
+ latents = latents.to(device=device, dtype=dtype)
500
+
501
+ return latents, image_latents
502
+
503
+ @property
504
+ def guidance_scale(self):
505
+ return self._guidance_scale
506
+
507
+ @property
508
+ def attention_kwargs(self):
509
+ return self._attention_kwargs
510
+
511
+ @property
512
+ def num_timesteps(self):
513
+ return self._num_timesteps
514
+
515
+ @property
516
+ def current_timestep(self):
517
+ return self._current_timestep
518
+
519
+ @property
520
+ def interrupt(self):
521
+ return self._interrupt
522
+
523
+ @torch.no_grad()
524
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
525
+ def __call__(
526
+ self,
527
+ image: Optional[PipelineImageInput] = None,
528
+ prompt: Union[str, List[str]] = None,
529
+ negative_prompt: Union[str, List[str]] = None,
530
+ true_cfg_scale: float = 4.0,
531
+ height: Optional[int] = None,
532
+ width: Optional[int] = None,
533
+ num_inference_steps: int = 50,
534
+ sigmas: Optional[List[float]] = None,
535
+ guidance_scale: Optional[float] = None,
536
+ num_images_per_prompt: int = 1,
537
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
538
+ latents: Optional[torch.Tensor] = None,
539
+ prompt_embeds: Optional[torch.Tensor] = None,
540
+ prompt_embeds_mask: Optional[torch.Tensor] = None,
541
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
542
+ negative_prompt_embeds_mask: Optional[torch.Tensor] = None,
543
+ output_type: Optional[str] = "pil",
544
+ return_dict: bool = True,
545
+ attention_kwargs: Optional[Dict[str, Any]] = None,
546
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
547
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
548
+ max_sequence_length: int = 512,
549
+ grag_scale = [((512,1.0,1.0),(512,1.0,1.0))] * 57,
550
+ ):
551
+ r"""
552
+ Function invoked when calling the pipeline for generation.
553
+
554
+ Args:
555
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
556
+ `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
557
+ numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list
558
+ or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
559
+ list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)` It can also accept image
560
+ latents as `image`, but if passing latents directly it is not encoded again.
561
+ prompt (`str` or `List[str]`, *optional*):
562
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
563
+ instead.
564
+ negative_prompt (`str` or `List[str]`, *optional*):
565
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
566
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is
567
+ not greater than `1`).
568
+ true_cfg_scale (`float`, *optional*, defaults to 1.0):
569
+ true_cfg_scale (`float`, *optional*, defaults to 1.0): Guidance scale as defined in [Classifier-Free
570
+ Diffusion Guidance](https://huggingface.co/papers/2207.12598). `true_cfg_scale` is defined as `w` of
571
+ equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487). Classifier-free guidance is
572
+ enabled by setting `true_cfg_scale > 1` and a provided `negative_prompt`. Higher guidance scale
573
+ encourages to generate images that are closely linked to the text `prompt`, usually at the expense of
574
+ lower image quality.
575
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
576
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
577
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
578
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
579
+ num_inference_steps (`int`, *optional*, defaults to 50):
580
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
581
+ expense of slower inference.
582
+ sigmas (`List[float]`, *optional*):
583
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
584
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
585
+ will be used.
586
+ guidance_scale (`float`, *optional*, defaults to None):
587
+ A guidance scale value for guidance distilled models. Unlike the traditional classifier-free guidance
588
+ where the guidance scale is applied during inference through noise prediction rescaling, guidance
589
+ distilled models take the guidance scale directly as an input parameter during forward pass. Guidance
590
+ scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images
591
+ that are closely linked to the text `prompt`, usually at the expense of lower image quality. This
592
+ parameter in the pipeline is there to support future guidance-distilled models when they come up. It is
593
+ ignored when not using guidance distilled models. To enable traditional classifier-free guidance,
594
+ please pass `true_cfg_scale > 1.0` and `negative_prompt` (even an empty negative prompt like " " should
595
+ enable classifier-free guidance computations).
596
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
597
+ The number of images to generate per prompt.
598
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
599
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
600
+ to make generation deterministic.
601
+ latents (`torch.Tensor`, *optional*):
602
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
603
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
604
+ tensor will be generated by sampling using the supplied random `generator`.
605
+ prompt_embeds (`torch.Tensor`, *optional*):
606
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
607
+ provided, text embeddings will be generated from `prompt` input argument.
608
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
609
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
610
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
611
+ argument.
612
+ output_type (`str`, *optional*, defaults to `"pil"`):
613
+ The output format of the generate image. Choose between
614
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
615
+ return_dict (`bool`, *optional*, defaults to `True`):
616
+ Whether or not to return a [`~pipelines.qwenimage.QwenImagePipelineOutput`] instead of a plain tuple.
617
+ attention_kwargs (`dict`, *optional*):
618
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
619
+ `self.processor` in
620
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
621
+ callback_on_step_end (`Callable`, *optional*):
622
+ A function that calls at the end of each denoising steps during the inference. The function is called
623
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
624
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
625
+ `callback_on_step_end_tensor_inputs`.
626
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
627
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
628
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
629
+ `._callback_tensor_inputs` attribute of your pipeline class.
630
+ max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
631
+
632
+ Examples:
633
+
634
+ Returns:
635
+ [`~pipelines.qwenimage.QwenImagePipelineOutput`] or `tuple`:
636
+ [`~pipelines.qwenimage.QwenImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When
637
+ returning a tuple, the first element is a list with the generated images.
638
+ """
639
+ image_size = image[0].size if isinstance(image, list) else image.size
640
+ calculated_width, calculated_height, _ = calculate_dimensions(1024 * 1024, image_size[0] / image_size[1])
641
+ height = height or calculated_height
642
+ width = width or calculated_width
643
+
644
+ multiple_of = self.vae_scale_factor * 2
645
+ width = width // multiple_of * multiple_of
646
+ height = height // multiple_of * multiple_of
647
+
648
+ # 1. Check inputs. Raise error if not correct
649
+ self.check_inputs(
650
+ prompt,
651
+ height,
652
+ width,
653
+ negative_prompt=negative_prompt,
654
+ prompt_embeds=prompt_embeds,
655
+ negative_prompt_embeds=negative_prompt_embeds,
656
+ prompt_embeds_mask=prompt_embeds_mask,
657
+ negative_prompt_embeds_mask=negative_prompt_embeds_mask,
658
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
659
+ max_sequence_length=max_sequence_length,
660
+ )
661
+
662
+ self._guidance_scale = guidance_scale
663
+ self._attention_kwargs = attention_kwargs
664
+ self._current_timestep = None
665
+ self._interrupt = False
666
+
667
+ # 2. Define call parameters
668
+ if prompt is not None and isinstance(prompt, str):
669
+ batch_size = 1
670
+ elif prompt is not None and isinstance(prompt, list):
671
+ batch_size = len(prompt)
672
+ else:
673
+ batch_size = prompt_embeds.shape[0]
674
+
675
+ device = self._execution_device
676
+ # 3. Preprocess image
677
+ if image is not None and not (isinstance(image, torch.Tensor) and image.size(1) == self.latent_channels):
678
+ image = self.image_processor.resize(image, calculated_height, calculated_width)
679
+ prompt_image = image
680
+ image = self.image_processor.preprocess(image, calculated_height, calculated_width)
681
+ image = image.unsqueeze(2)
682
+
683
+ has_neg_prompt = negative_prompt is not None or (
684
+ negative_prompt_embeds is not None and negative_prompt_embeds_mask is not None
685
+ )
686
+
687
+ if true_cfg_scale > 1 and not has_neg_prompt:
688
+ logger.warning(
689
+ f"true_cfg_scale is passed as {true_cfg_scale}, but classifier-free guidance is not enabled since no negative_prompt is provided."
690
+ )
691
+ elif true_cfg_scale <= 1 and has_neg_prompt:
692
+ logger.warning(
693
+ " negative_prompt is passed but classifier-free guidance is not enabled since true_cfg_scale <= 1"
694
+ )
695
+
696
+ do_true_cfg = true_cfg_scale > 1 and has_neg_prompt
697
+ prompt_embeds, prompt_embeds_mask = self.encode_prompt(
698
+ image=prompt_image,
699
+ prompt=prompt,
700
+ prompt_embeds=prompt_embeds,
701
+ prompt_embeds_mask=prompt_embeds_mask,
702
+ device=device,
703
+ num_images_per_prompt=num_images_per_prompt,
704
+ max_sequence_length=max_sequence_length,
705
+ )
706
+ if do_true_cfg:
707
+ negative_prompt_embeds, negative_prompt_embeds_mask = self.encode_prompt(
708
+ image=prompt_image,
709
+ prompt=negative_prompt,
710
+ prompt_embeds=negative_prompt_embeds,
711
+ prompt_embeds_mask=negative_prompt_embeds_mask,
712
+ device=device,
713
+ num_images_per_prompt=num_images_per_prompt,
714
+ max_sequence_length=max_sequence_length,
715
+ )
716
+
717
+ # 4. Prepare latent variables
718
+ num_channels_latents = self.transformer.config.in_channels // 4
719
+ latents, image_latents = self.prepare_latents(
720
+ image,
721
+ batch_size * num_images_per_prompt,
722
+ num_channels_latents,
723
+ height,
724
+ width,
725
+ prompt_embeds.dtype,
726
+ device,
727
+ generator,
728
+ latents,
729
+ )
730
+ img_shapes = [
731
+ [
732
+ (1, height // self.vae_scale_factor // 2, width // self.vae_scale_factor // 2),
733
+ (1, calculated_height // self.vae_scale_factor // 2, calculated_width // self.vae_scale_factor // 2),
734
+ ]
735
+ ] * batch_size
736
+
737
+ # 5. Prepare timesteps
738
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas
739
+ image_seq_len = latents.shape[1]
740
+ mu = calculate_shift(
741
+ image_seq_len,
742
+ self.scheduler.config.get("base_image_seq_len", 256),
743
+ self.scheduler.config.get("max_image_seq_len", 4096),
744
+ self.scheduler.config.get("base_shift", 0.5),
745
+ self.scheduler.config.get("max_shift", 1.15),
746
+ )
747
+ timesteps, num_inference_steps = retrieve_timesteps(
748
+ self.scheduler,
749
+ num_inference_steps,
750
+ device,
751
+ sigmas=sigmas,
752
+ mu=mu,
753
+ )
754
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
755
+ self._num_timesteps = len(timesteps)
756
+
757
+ # handle guidance
758
+ if self.transformer.config.guidance_embeds and guidance_scale is None:
759
+ raise ValueError("guidance_scale is required for guidance-distilled model.")
760
+ elif self.transformer.config.guidance_embeds:
761
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
762
+ guidance = guidance.expand(latents.shape[0])
763
+ elif not self.transformer.config.guidance_embeds and guidance_scale is not None:
764
+ logger.warning(
765
+ f"guidance_scale is passed as {guidance_scale}, but ignored since the model is not guidance-distilled."
766
+ )
767
+ guidance = None
768
+ elif not self.transformer.config.guidance_embeds and guidance_scale is None:
769
+ guidance = None
770
+
771
+ if self.attention_kwargs is None:
772
+ self._attention_kwargs = {}
773
+
774
+ txt_seq_lens = prompt_embeds_mask.sum(dim=1).tolist() if prompt_embeds_mask is not None else None
775
+ negative_txt_seq_lens = (
776
+ negative_prompt_embeds_mask.sum(dim=1).tolist() if negative_prompt_embeds_mask is not None else None
777
+ )
778
+
779
+ # 6. Denoising loop
780
+ self.scheduler.set_begin_index(0)
781
+
782
+ x0_previews = []
783
+
784
+ saved_outputs = {}
785
+ handles = []
786
+ initial_latents = latents.clone()
787
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
788
+ for i, t in enumerate(timesteps):
789
+ if self.interrupt:
790
+ continue
791
+
792
+ hook_outs = []
793
+
794
+ self._current_timestep = t
795
+
796
+ latent_model_input = latents
797
+ if image_latents is not None:
798
+
799
+ latent_model_input = torch.cat([latents, image_latents], dim=1)
800
+
801
+
802
+ def save_output_hook(module, input, output):
803
+ qkv = {}
804
+ hook_outs.append(qkv)
805
+
806
+ def register_hook(model):
807
+ handles = []
808
+ for name, module in model.named_modules():
809
+ # print(name)
810
+ if name.split('.')[-1] == "attn":
811
+ handle = module.register_forward_hook(save_output_hook)
812
+ handles.append(handle)
813
+ return handles
814
+
815
+ if i in []:
816
+ handles = register_hook(self.transformer)
817
+
818
+
819
+
820
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
821
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
822
+ with self.transformer.cache_context("cond"):
823
+ noise_pred = self.transformer(
824
+ hidden_states=latent_model_input,
825
+ timestep=timestep / 1000,
826
+ guidance=guidance,
827
+ encoder_hidden_states_mask=prompt_embeds_mask,
828
+ encoder_hidden_states=prompt_embeds,
829
+ img_shapes=img_shapes,
830
+ txt_seq_lens=txt_seq_lens,
831
+ attention_kwargs=self.attention_kwargs,
832
+ return_dict=False,
833
+ grag_scale=grag_scale.copy(),
834
+ )[0]
835
+ noise_pred = noise_pred[:, : latents.size(1)]
836
+
837
+ if do_true_cfg:
838
+ with self.transformer.cache_context("uncond"):
839
+ neg_noise_pred = self.transformer(
840
+ hidden_states=latent_model_input,
841
+ timestep=timestep / 1000,
842
+ guidance=guidance,
843
+ encoder_hidden_states_mask=negative_prompt_embeds_mask,
844
+ encoder_hidden_states=negative_prompt_embeds,
845
+ img_shapes=img_shapes,
846
+ txt_seq_lens=negative_txt_seq_lens,
847
+ attention_kwargs=self.attention_kwargs,
848
+ return_dict=False,
849
+ grag_scale=grag_scale.copy(),
850
+ )[0]
851
+ neg_noise_pred = neg_noise_pred[:, : latents.size(1)]
852
+ comb_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred)
853
+
854
+ cond_norm = torch.norm(noise_pred, dim=-1, keepdim=True)
855
+ noise_norm = torch.norm(comb_pred, dim=-1, keepdim=True)
856
+ noise_pred = comb_pred * (cond_norm / noise_norm)
857
+
858
+
859
+ for handle in handles:
860
+ handle.remove()
861
+ handles = []
862
+ if len(hook_outs) > 0 :
863
+ saved_outputs[i] = hook_outs
864
+ hook_outs = None
865
+ del hook_outs
866
+
867
+ # compute the previous noisy sample x_t -> x_t-1
868
+ latents_dtype = latents.dtype
869
+ # latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
870
+
871
+ latents, x0_latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)
872
+ x0_previews.append(x0_latents)
873
+
874
+ if latents.dtype != latents_dtype:
875
+ if torch.backends.mps.is_available():
876
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
877
+ latents = latents.to(latents_dtype)
878
+
879
+ if callback_on_step_end is not None:
880
+ callback_kwargs = {}
881
+ for k in callback_on_step_end_tensor_inputs:
882
+ callback_kwargs[k] = locals()[k]
883
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
884
+
885
+ latents = callback_outputs.pop("latents", latents)
886
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
887
+
888
+ # call the callback, if provided
889
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
890
+ progress_bar.update()
891
+
892
+ if XLA_AVAILABLE:
893
+ xm.mark_step()
894
+
895
+ self._current_timestep = None
896
+ if output_type == "latent":
897
+ image = latents
898
+ else:
899
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
900
+ latents = latents.to(self.vae.dtype)
901
+ latents_mean = (
902
+ torch.tensor(self.vae.config.latents_mean)
903
+ .view(1, self.vae.config.z_dim, 1, 1, 1)
904
+ .to(latents.device, latents.dtype)
905
+ )
906
+ latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
907
+ latents.device, latents.dtype
908
+ )
909
+ latents = latents / latents_std + latents_mean
910
+ image = self.vae.decode(latents, return_dict=False)[0][:, :, 0]
911
+ image = self.image_processor.postprocess(image, output_type=output_type)
912
+
913
+ x0_images = []
914
+ for x0_latents in x0_previews:
915
+ x0_latents = self._unpack_latents(x0_latents, height, width, self.vae_scale_factor)
916
+ x0_latents = x0_latents.to(self.vae.dtype)
917
+ x0_latents_mean = (
918
+ torch.tensor(self.vae.config.latents_mean)
919
+ .view(1, self.vae.config.z_dim, 1, 1, 1)
920
+ .to(x0_latents.device, x0_latents.dtype)
921
+ )
922
+ x0_latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
923
+ x0_latents.device, x0_latents.dtype
924
+ )
925
+ x0_latents = x0_latents / x0_latents_std + x0_latents_mean
926
+ x0_image = self.vae.decode(x0_latents, return_dict=False)[0][:, :, 0]
927
+ x0_image = self.image_processor.postprocess(x0_image, output_type=output_type)
928
+ x0_images.append(x0_image[0])
929
+
930
+ x0_previews = None
931
+ del x0_previews
932
+ # Offload all models
933
+ self.maybe_free_model_hooks()
934
+
935
+ if not return_dict:
936
+ return (image,x0_images,saved_outputs)
937
+
938
+ return QwenImagePipelineOutput(images=image)
hacked_models/scheduler.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from dataclasses import dataclass
17
+ from typing import List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+
22
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
23
+ from diffusers.utils import BaseOutput, is_scipy_available, logging
24
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
25
+
26
+ if is_scipy_available():
27
+ import scipy.stats
28
+
29
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
30
+
31
+
32
+ @dataclass
33
+ class FlowMatchEulerDiscreteSchedulerOutput(BaseOutput):
34
+ """
35
+ Output class for the scheduler's `step` function output.
36
+
37
+ Args:
38
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
39
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
40
+ denoising loop.
41
+ """
42
+
43
+ prev_sample: torch.FloatTensor
44
+
45
+
46
+ class FlowMatchEulerDiscreteScheduler(SchedulerMixin, ConfigMixin):
47
+ """
48
+ Euler scheduler.
49
+
50
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
51
+ methods the library implements for all schedulers such as loading and saving.
52
+
53
+ Args:
54
+ num_train_timesteps (`int`, defaults to 1000):
55
+ The number of diffusion steps to train the model.
56
+ shift (`float`, defaults to 1.0):
57
+ The shift value for the timestep schedule.
58
+ use_dynamic_shifting (`bool`, defaults to False):
59
+ Whether to apply timestep shifting on-the-fly based on the image resolution.
60
+ base_shift (`float`, defaults to 0.5):
61
+ Value to stabilize image generation. Increasing `base_shift` reduces variation and image is more consistent
62
+ with desired output.
63
+ max_shift (`float`, defaults to 1.15):
64
+ Value change allowed to latent vectors. Increasing `max_shift` encourages more variation and image may be
65
+ more exaggerated or stylized.
66
+ base_image_seq_len (`int`, defaults to 256):
67
+ The base image sequence length.
68
+ max_image_seq_len (`int`, defaults to 4096):
69
+ The maximum image sequence length.
70
+ invert_sigmas (`bool`, defaults to False):
71
+ Whether to invert the sigmas.
72
+ shift_terminal (`float`, defaults to None):
73
+ The end value of the shifted timestep schedule.
74
+ use_karras_sigmas (`bool`, defaults to False):
75
+ Whether to use Karras sigmas for step sizes in the noise schedule during sampling.
76
+ use_exponential_sigmas (`bool`, defaults to False):
77
+ Whether to use exponential sigmas for step sizes in the noise schedule during sampling.
78
+ use_beta_sigmas (`bool`, defaults to False):
79
+ Whether to use beta sigmas for step sizes in the noise schedule during sampling.
80
+ time_shift_type (`str`, defaults to "exponential"):
81
+ The type of dynamic resolution-dependent timestep shifting to apply. Either "exponential" or "linear".
82
+ stochastic_sampling (`bool`, defaults to False):
83
+ Whether to use stochastic sampling.
84
+ """
85
+
86
+ _compatibles = []
87
+ order = 1
88
+
89
+ @register_to_config
90
+ def __init__(
91
+ self,
92
+ num_train_timesteps: int = 1000,
93
+ shift: float = 1.0,
94
+ use_dynamic_shifting: bool = False,
95
+ base_shift: Optional[float] = 0.5,
96
+ max_shift: Optional[float] = 1.15,
97
+ base_image_seq_len: Optional[int] = 256,
98
+ max_image_seq_len: Optional[int] = 4096,
99
+ invert_sigmas: bool = False,
100
+ shift_terminal: Optional[float] = None,
101
+ use_karras_sigmas: Optional[bool] = False,
102
+ use_exponential_sigmas: Optional[bool] = False,
103
+ use_beta_sigmas: Optional[bool] = False,
104
+ time_shift_type: str = "exponential",
105
+ stochastic_sampling: bool = False,
106
+ ):
107
+ if self.config.use_beta_sigmas and not is_scipy_available():
108
+ raise ImportError("Make sure to install scipy if you want to use beta sigmas.")
109
+ if sum([self.config.use_beta_sigmas, self.config.use_exponential_sigmas, self.config.use_karras_sigmas]) > 1:
110
+ raise ValueError(
111
+ "Only one of `config.use_beta_sigmas`, `config.use_exponential_sigmas`, `config.use_karras_sigmas` can be used."
112
+ )
113
+ if time_shift_type not in {"exponential", "linear"}:
114
+ raise ValueError("`time_shift_type` must either be 'exponential' or 'linear'.")
115
+
116
+ timesteps = np.linspace(1, num_train_timesteps, num_train_timesteps, dtype=np.float32)[::-1].copy()
117
+ timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32)
118
+
119
+ sigmas = timesteps / num_train_timesteps
120
+ if not use_dynamic_shifting:
121
+ # when use_dynamic_shifting is True, we apply the timestep shifting on the fly based on the image resolution
122
+ sigmas = shift * sigmas / (1 + (shift - 1) * sigmas)
123
+
124
+ self.timesteps = sigmas * num_train_timesteps
125
+
126
+ self._step_index = None
127
+ self._begin_index = None
128
+
129
+ self._shift = shift
130
+
131
+ self.sigmas = sigmas.to("cpu") # to avoid too much CPU/GPU communication
132
+ self.sigma_min = self.sigmas[-1].item()
133
+ self.sigma_max = self.sigmas[0].item()
134
+
135
+ @property
136
+ def shift(self):
137
+ """
138
+ The value used for shifting.
139
+ """
140
+ return self._shift
141
+
142
+ @property
143
+ def step_index(self):
144
+ """
145
+ The index counter for current timestep. It will increase 1 after each scheduler step.
146
+ """
147
+ return self._step_index
148
+
149
+ @property
150
+ def begin_index(self):
151
+ """
152
+ The index for the first timestep. It should be set from pipeline with `set_begin_index` method.
153
+ """
154
+ return self._begin_index
155
+
156
+ # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index
157
+ def set_begin_index(self, begin_index: int = 0):
158
+ """
159
+ Sets the begin index for the scheduler. This function should be run from pipeline before the inference.
160
+
161
+ Args:
162
+ begin_index (`int`):
163
+ The begin index for the scheduler.
164
+ """
165
+ self._begin_index = begin_index
166
+
167
+ def set_shift(self, shift: float):
168
+ self._shift = shift
169
+
170
+ def scale_noise(
171
+ self,
172
+ sample: torch.FloatTensor,
173
+ timestep: Union[float, torch.FloatTensor],
174
+ noise: Optional[torch.FloatTensor] = None,
175
+ ) -> torch.FloatTensor:
176
+ """
177
+ Forward process in flow-matching
178
+
179
+ Args:
180
+ sample (`torch.FloatTensor`):
181
+ The input sample.
182
+ timestep (`int`, *optional*):
183
+ The current timestep in the diffusion chain.
184
+
185
+ Returns:
186
+ `torch.FloatTensor`:
187
+ A scaled input sample.
188
+ """
189
+ # Make sure sigmas and timesteps have the same device and dtype as original_samples
190
+ sigmas = self.sigmas.to(device=sample.device, dtype=sample.dtype)
191
+
192
+ if sample.device.type == "mps" and torch.is_floating_point(timestep):
193
+ # mps does not support float64
194
+ schedule_timesteps = self.timesteps.to(sample.device, dtype=torch.float32)
195
+ timestep = timestep.to(sample.device, dtype=torch.float32)
196
+ else:
197
+ schedule_timesteps = self.timesteps.to(sample.device)
198
+ timestep = timestep.to(sample.device)
199
+
200
+ # self.begin_index is None when scheduler is used for training, or pipeline does not implement set_begin_index
201
+ if self.begin_index is None:
202
+ step_indices = [self.index_for_timestep(t, schedule_timesteps) for t in timestep]
203
+ elif self.step_index is not None:
204
+ # add_noise is called after first denoising step (for inpainting)
205
+ step_indices = [self.step_index] * timestep.shape[0]
206
+ else:
207
+ # add noise is called before first denoising step to create initial latent(img2img)
208
+ step_indices = [self.begin_index] * timestep.shape[0]
209
+
210
+ sigma = sigmas[step_indices].flatten()
211
+ while len(sigma.shape) < len(sample.shape):
212
+ sigma = sigma.unsqueeze(-1)
213
+
214
+ sample = sigma * noise + (1.0 - sigma) * sample
215
+
216
+ return sample
217
+
218
+ def _sigma_to_t(self, sigma):
219
+ return sigma * self.config.num_train_timesteps
220
+
221
+ def time_shift(self, mu: float, sigma: float, t: torch.Tensor):
222
+ if self.config.time_shift_type == "exponential":
223
+ return self._time_shift_exponential(mu, sigma, t)
224
+ elif self.config.time_shift_type == "linear":
225
+ return self._time_shift_linear(mu, sigma, t)
226
+
227
+ def stretch_shift_to_terminal(self, t: torch.Tensor) -> torch.Tensor:
228
+ r"""
229
+ Stretches and shifts the timestep schedule to ensure it terminates at the configured `shift_terminal` config
230
+ value.
231
+
232
+ Reference:
233
+ https://github.com/Lightricks/LTX-Video/blob/a01a171f8fe3d99dce2728d60a73fecf4d4238ae/ltx_video/schedulers/rf.py#L51
234
+
235
+ Args:
236
+ t (`torch.Tensor`):
237
+ A tensor of timesteps to be stretched and shifted.
238
+
239
+ Returns:
240
+ `torch.Tensor`:
241
+ A tensor of adjusted timesteps such that the final value equals `self.config.shift_terminal`.
242
+ """
243
+ one_minus_z = 1 - t
244
+ scale_factor = one_minus_z[-1] / (1 - self.config.shift_terminal)
245
+ stretched_t = 1 - (one_minus_z / scale_factor)
246
+ return stretched_t
247
+
248
+ def set_timesteps(
249
+ self,
250
+ num_inference_steps: Optional[int] = None,
251
+ device: Union[str, torch.device] = None,
252
+ sigmas: Optional[List[float]] = None,
253
+ mu: Optional[float] = None,
254
+ timesteps: Optional[List[float]] = None,
255
+ ):
256
+ """
257
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
258
+
259
+ Args:
260
+ num_inference_steps (`int`, *optional*):
261
+ The number of diffusion steps used when generating samples with a pre-trained model.
262
+ device (`str` or `torch.device`, *optional*):
263
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
264
+ sigmas (`List[float]`, *optional*):
265
+ Custom values for sigmas to be used for each diffusion step. If `None`, the sigmas are computed
266
+ automatically.
267
+ mu (`float`, *optional*):
268
+ Determines the amount of shifting applied to sigmas when performing resolution-dependent timestep
269
+ shifting.
270
+ timesteps (`List[float]`, *optional*):
271
+ Custom values for timesteps to be used for each diffusion step. If `None`, the timesteps are computed
272
+ automatically.
273
+ """
274
+ if self.config.use_dynamic_shifting and mu is None:
275
+ raise ValueError("`mu` must be passed when `use_dynamic_shifting` is set to be `True`")
276
+
277
+ if sigmas is not None and timesteps is not None:
278
+ if len(sigmas) != len(timesteps):
279
+ raise ValueError("`sigmas` and `timesteps` should have the same length")
280
+
281
+ if num_inference_steps is not None:
282
+ if (sigmas is not None and len(sigmas) != num_inference_steps) or (
283
+ timesteps is not None and len(timesteps) != num_inference_steps
284
+ ):
285
+ raise ValueError(
286
+ "`sigmas` and `timesteps` should have the same length as num_inference_steps, if `num_inference_steps` is provided"
287
+ )
288
+ else:
289
+ num_inference_steps = len(sigmas) if sigmas is not None else len(timesteps)
290
+
291
+ self.num_inference_steps = num_inference_steps
292
+
293
+ # 1. Prepare default sigmas
294
+ is_timesteps_provided = timesteps is not None
295
+
296
+ if is_timesteps_provided:
297
+ timesteps = np.array(timesteps).astype(np.float32)
298
+
299
+ if sigmas is None:
300
+ if timesteps is None:
301
+ timesteps = np.linspace(
302
+ self._sigma_to_t(self.sigma_max), self._sigma_to_t(self.sigma_min), num_inference_steps
303
+ )
304
+ sigmas = timesteps / self.config.num_train_timesteps
305
+ else:
306
+ sigmas = np.array(sigmas).astype(np.float32)
307
+ num_inference_steps = len(sigmas)
308
+
309
+ # 2. Perform timestep shifting. Either no shifting is applied, or resolution-dependent shifting of
310
+ # "exponential" or "linear" type is applied
311
+ if self.config.use_dynamic_shifting:
312
+ sigmas = self.time_shift(mu, 1.0, sigmas)
313
+ else:
314
+ sigmas = self.shift * sigmas / (1 + (self.shift - 1) * sigmas)
315
+
316
+ # 3. If required, stretch the sigmas schedule to terminate at the configured `shift_terminal` value
317
+ if self.config.shift_terminal:
318
+ sigmas = self.stretch_shift_to_terminal(sigmas)
319
+
320
+ # 4. If required, convert sigmas to one of karras, exponential, or beta sigma schedules
321
+ if self.config.use_karras_sigmas:
322
+ sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
323
+ elif self.config.use_exponential_sigmas:
324
+ sigmas = self._convert_to_exponential(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
325
+ elif self.config.use_beta_sigmas:
326
+ sigmas = self._convert_to_beta(in_sigmas=sigmas, num_inference_steps=num_inference_steps)
327
+
328
+ # 5. Convert sigmas and timesteps to tensors and move to specified device
329
+ sigmas = torch.from_numpy(sigmas).to(dtype=torch.float32, device=device)
330
+ if not is_timesteps_provided:
331
+ timesteps = sigmas * self.config.num_train_timesteps
332
+ else:
333
+ timesteps = torch.from_numpy(timesteps).to(dtype=torch.float32, device=device)
334
+
335
+ # 6. Append the terminal sigma value.
336
+ # If a model requires inverted sigma schedule for denoising but timesteps without inversion, the
337
+ # `invert_sigmas` flag can be set to `True`. This case is only required in Mochi
338
+ if self.config.invert_sigmas:
339
+ sigmas = 1.0 - sigmas
340
+ timesteps = sigmas * self.config.num_train_timesteps
341
+ sigmas = torch.cat([sigmas, torch.ones(1, device=sigmas.device)])
342
+ else:
343
+ sigmas = torch.cat([sigmas, torch.zeros(1, device=sigmas.device)])
344
+
345
+ self.timesteps = timesteps
346
+ self.sigmas = sigmas
347
+ self._step_index = None
348
+ self._begin_index = None
349
+
350
+ def index_for_timestep(self, timestep, schedule_timesteps=None):
351
+ if schedule_timesteps is None:
352
+ schedule_timesteps = self.timesteps
353
+
354
+ indices = (schedule_timesteps == timestep).nonzero()
355
+
356
+ # The sigma index that is taken for the **very** first `step`
357
+ # is always the second index (or the last index if there is only 1)
358
+ # This way we can ensure we don't accidentally skip a sigma in
359
+ # case we start in the middle of the denoising schedule (e.g. for image-to-image)
360
+ pos = 1 if len(indices) > 1 else 0
361
+
362
+ return indices[pos].item()
363
+
364
+ def _init_step_index(self, timestep):
365
+ if self.begin_index is None:
366
+ if isinstance(timestep, torch.Tensor):
367
+ timestep = timestep.to(self.timesteps.device)
368
+ self._step_index = self.index_for_timestep(timestep)
369
+ else:
370
+ self._step_index = self._begin_index
371
+
372
+ def step(
373
+ self,
374
+ model_output: torch.FloatTensor,
375
+ timestep: Union[float, torch.FloatTensor],
376
+ sample: torch.FloatTensor,
377
+ s_churn: float = 0.0,
378
+ s_tmin: float = 0.0,
379
+ s_tmax: float = float("inf"),
380
+ s_noise: float = 1.0,
381
+ generator: Optional[torch.Generator] = None,
382
+ per_token_timesteps: Optional[torch.Tensor] = None,
383
+ return_dict: bool = True,
384
+ ) -> Union[FlowMatchEulerDiscreteSchedulerOutput, Tuple]:
385
+ """
386
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
387
+ process from the learned model outputs (most often the predicted noise).
388
+
389
+ Args:
390
+ model_output (`torch.FloatTensor`):
391
+ The direct output from learned diffusion model.
392
+ timestep (`float`):
393
+ The current discrete timestep in the diffusion chain.
394
+ sample (`torch.FloatTensor`):
395
+ A current instance of a sample created by the diffusion process.
396
+ s_churn (`float`):
397
+ s_tmin (`float`):
398
+ s_tmax (`float`):
399
+ s_noise (`float`, defaults to 1.0):
400
+ Scaling factor for noise added to the sample.
401
+ generator (`torch.Generator`, *optional*):
402
+ A random number generator.
403
+ per_token_timesteps (`torch.Tensor`, *optional*):
404
+ The timesteps for each token in the sample.
405
+ return_dict (`bool`):
406
+ Whether or not to return a
407
+ [`~schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteSchedulerOutput`] or tuple.
408
+
409
+ Returns:
410
+ [`~schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteSchedulerOutput`] or `tuple`:
411
+ If return_dict is `True`,
412
+ [`~schedulers.scheduling_flow_match_euler_discrete.FlowMatchEulerDiscreteSchedulerOutput`] is returned,
413
+ otherwise a tuple is returned where the first element is the sample tensor.
414
+ """
415
+
416
+ if (
417
+ isinstance(timestep, int)
418
+ or isinstance(timestep, torch.IntTensor)
419
+ or isinstance(timestep, torch.LongTensor)
420
+ ):
421
+ raise ValueError(
422
+ (
423
+ "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
424
+ " `FlowMatchEulerDiscreteScheduler.step()` is not supported. Make sure to pass"
425
+ " one of the `scheduler.timesteps` as a timestep."
426
+ ),
427
+ )
428
+
429
+ if self.step_index is None:
430
+ self._init_step_index(timestep)
431
+
432
+ # Upcast to avoid precision issues when computing prev_sample
433
+ sample = sample.to(torch.float32)
434
+
435
+ if per_token_timesteps is not None:
436
+ per_token_sigmas = per_token_timesteps / self.config.num_train_timesteps
437
+
438
+ sigmas = self.sigmas[:, None, None]
439
+ lower_mask = sigmas < per_token_sigmas[None] - 1e-6
440
+ lower_sigmas = lower_mask * sigmas
441
+ lower_sigmas, _ = lower_sigmas.max(dim=0)
442
+
443
+ current_sigma = per_token_sigmas[..., None]
444
+ next_sigma = lower_sigmas[..., None]
445
+ dt = current_sigma - next_sigma
446
+ else:
447
+ sigma_idx = self.step_index
448
+ sigma = self.sigmas[sigma_idx]
449
+ sigma_next = self.sigmas[sigma_idx + 1]
450
+
451
+ current_sigma = sigma
452
+ next_sigma = sigma_next
453
+ dt = sigma_next - sigma
454
+
455
+ if self.config.stochastic_sampling:
456
+ x0 = sample - current_sigma * model_output
457
+ noise = torch.randn_like(sample)
458
+ prev_sample = (1.0 - next_sigma) * x0 + next_sigma * noise
459
+
460
+ else:
461
+ prev_sample = sample + dt * model_output
462
+
463
+ pred_original_sample = sample + (self.sigmas[-1] - sigma) * model_output
464
+
465
+ # upon completion increase step index by one
466
+ self._step_index += 1
467
+ if per_token_timesteps is None:
468
+ # Cast sample back to model compatible dtype
469
+ prev_sample = prev_sample.to(model_output.dtype)
470
+ pred_original_sample = pred_original_sample.to(model_output.dtype)
471
+
472
+ if not return_dict:
473
+ return (prev_sample,pred_original_sample)
474
+
475
+ return FlowMatchEulerDiscreteSchedulerOutput(prev_sample=prev_sample)
476
+
477
+ # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras
478
+ def _convert_to_karras(self, in_sigmas: torch.Tensor, num_inference_steps) -> torch.Tensor:
479
+ """Constructs the noise schedule of Karras et al. (2022)."""
480
+
481
+ # Hack to make sure that other schedulers which copy this function don't break
482
+ # TODO: Add this logic to the other schedulers
483
+ if hasattr(self.config, "sigma_min"):
484
+ sigma_min = self.config.sigma_min
485
+ else:
486
+ sigma_min = None
487
+
488
+ if hasattr(self.config, "sigma_max"):
489
+ sigma_max = self.config.sigma_max
490
+ else:
491
+ sigma_max = None
492
+
493
+ sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
494
+ sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
495
+
496
+ rho = 7.0 # 7.0 is the value used in the paper
497
+ ramp = np.linspace(0, 1, num_inference_steps)
498
+ min_inv_rho = sigma_min ** (1 / rho)
499
+ max_inv_rho = sigma_max ** (1 / rho)
500
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
501
+ return sigmas
502
+
503
+ # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_exponential
504
+ def _convert_to_exponential(self, in_sigmas: torch.Tensor, num_inference_steps: int) -> torch.Tensor:
505
+ """Constructs an exponential noise schedule."""
506
+
507
+ # Hack to make sure that other schedulers which copy this function don't break
508
+ # TODO: Add this logic to the other schedulers
509
+ if hasattr(self.config, "sigma_min"):
510
+ sigma_min = self.config.sigma_min
511
+ else:
512
+ sigma_min = None
513
+
514
+ if hasattr(self.config, "sigma_max"):
515
+ sigma_max = self.config.sigma_max
516
+ else:
517
+ sigma_max = None
518
+
519
+ sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
520
+ sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
521
+
522
+ sigmas = np.exp(np.linspace(math.log(sigma_max), math.log(sigma_min), num_inference_steps))
523
+ return sigmas
524
+
525
+ # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_beta
526
+ def _convert_to_beta(
527
+ self, in_sigmas: torch.Tensor, num_inference_steps: int, alpha: float = 0.6, beta: float = 0.6
528
+ ) -> torch.Tensor:
529
+ """From "Beta Sampling is All You Need" [arXiv:2407.12173] (Lee et. al, 2024)"""
530
+
531
+ # Hack to make sure that other schedulers which copy this function don't break
532
+ # TODO: Add this logic to the other schedulers
533
+ if hasattr(self.config, "sigma_min"):
534
+ sigma_min = self.config.sigma_min
535
+ else:
536
+ sigma_min = None
537
+
538
+ if hasattr(self.config, "sigma_max"):
539
+ sigma_max = self.config.sigma_max
540
+ else:
541
+ sigma_max = None
542
+
543
+ sigma_min = sigma_min if sigma_min is not None else in_sigmas[-1].item()
544
+ sigma_max = sigma_max if sigma_max is not None else in_sigmas[0].item()
545
+
546
+ sigmas = np.array(
547
+ [
548
+ sigma_min + (ppf * (sigma_max - sigma_min))
549
+ for ppf in [
550
+ scipy.stats.beta.ppf(timestep, alpha, beta)
551
+ for timestep in 1 - np.linspace(0, 1, num_inference_steps)
552
+ ]
553
+ ]
554
+ )
555
+ return sigmas
556
+
557
+ def _time_shift_exponential(self, mu, sigma, t):
558
+ return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
559
+
560
+ def _time_shift_linear(self, mu, sigma, t):
561
+ return mu / (mu + (1 / t - 1) ** sigma)
562
+
563
+ def __len__(self):
564
+ return self.config.num_train_timesteps
hacked_models/utils.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from PIL import Image, ImageDraw, ImageFont
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import matplotlib.pyplot as plt
6
+ import numpy as np
7
+ from typing import Dict, List, Literal
8
+ import matplotlib.cm as cm
9
+ import matplotlib as mpl
10
+ from mpl_toolkits.mplot3d import Axes3D
11
+ import os
12
+ from pathlib import Path
13
+ from datetime import datetime
14
+ from base64 import b64encode
15
+ from PIL import Image
16
+ import random
17
+ import io
18
+ from io import BytesIO
19
+
20
+ def seed_everything(seed: int = 42, deterministic: bool = False):
21
+
22
+ random.seed(seed)
23
+ os.environ["PYTHONHASHSEED"] = str(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+ torch.cuda.manual_seed(seed)
27
+ torch.cuda.manual_seed_all(seed)
28
+ if deterministic:
29
+ torch.backends.cudnn.deterministic = True
30
+ torch.backends.cudnn.benchmark = False
31
+ else:
32
+ torch.backends.cudnn.deterministic = False
33
+ torch.backends.cudnn.benchmark = True
34
+
35
+ print(f"✅ Random seed set to {seed}, deterministic={deterministic}")
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ diffusers
3
+ gradio
4
+ ipykernel
5
+ jaraco.collections
6
+ matplotlib
7
+ pathlib
8
+ pickleshare
9
+ pip-chill
10
+ termcolor
11
+ tomli
12
+ torch
13
+ torchvision
14
+ transformers
15
+ huggingface_hub