Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
ee7e9f5
·
verified ·
1 Parent(s): 1ad7ccd

Upload folder using huggingface_hub

Browse files
main/pipeline_z_image_differential_img2img.py ADDED
@@ -0,0 +1,844 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 Alibaba Z-Image Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ from transformers import AutoTokenizer, PreTrainedModel
20
+
21
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
22
+ from diffusers.loaders import FromSingleFileMixin, ZImageLoraLoaderMixin
23
+ from diffusers.models.autoencoders import AutoencoderKL
24
+ from diffusers.models.transformers import ZImageTransformer2DModel
25
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
26
+ from diffusers.pipelines.z_image.pipeline_output import ZImagePipelineOutput
27
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
28
+ from diffusers.utils import logging, replace_example_docstring
29
+ from diffusers.utils.torch_utils import randn_tensor
30
+
31
+
32
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
33
+
34
+ EXAMPLE_DOC_STRING = """
35
+ Examples:
36
+ ```py
37
+ >>> import torch
38
+ >>> from pipeline_z_image_differential_img2img import ZImageDifferentialImg2ImgPipeline
39
+ >>> from diffusers.utils import load_image
40
+
41
+ >>> pipe = ZImageDifferentialImg2ImgPipeline.from_pretrained("Z-a-o/Z-Image-Turbo", torch_dtype=torch.bfloat16)
42
+ >>> pipe.to("cuda")
43
+
44
+ >>> init_image = load_image(
45
+ >>> "https://github.com/exx8/differential-diffusion/blob/main/assets/input.jpg?raw=true",
46
+ >>> )
47
+
48
+ >>> mask = load_image(
49
+ >>> "https://github.com/exx8/differential-diffusion/blob/main/assets/map.jpg?raw=true",
50
+ >>> )
51
+
52
+ >>> prompt = "painting of a mountain landscape with a meadow and a forest, meadow background, anime countryside landscape, anime nature wallpap, anime landscape wallpaper, studio ghibli landscape, anime landscape, mountain behind meadow, anime background art, studio ghibli environment, background of flowery hill, anime beautiful peace scene, forrest background, anime scenery, landscape background, background art, anime scenery concept art"
53
+
54
+ >>> image = pipe(
55
+ ... prompt,
56
+ ... image=init_image,
57
+ ... mask_image=mask,
58
+ ... strength=0.75,
59
+ ... num_inference_steps=9,
60
+ ... guidance_scale=0.0,
61
+ ... generator=torch.Generator("cuda").manual_seed(41),
62
+ ... ).images[0]
63
+ >>> image.save("image.png")
64
+ ```
65
+ """
66
+
67
+
68
+ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift
69
+ def calculate_shift(
70
+ image_seq_len,
71
+ base_seq_len: int = 256,
72
+ max_seq_len: int = 4096,
73
+ base_shift: float = 0.5,
74
+ max_shift: float = 1.15,
75
+ ):
76
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
77
+ b = base_shift - m * base_seq_len
78
+ mu = image_seq_len * m + b
79
+ return mu
80
+
81
+
82
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
83
+ def retrieve_latents(
84
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
85
+ ):
86
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
87
+ return encoder_output.latent_dist.sample(generator)
88
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
89
+ return encoder_output.latent_dist.mode()
90
+ elif hasattr(encoder_output, "latents"):
91
+ return encoder_output.latents
92
+ else:
93
+ raise AttributeError("Could not access latents of provided encoder_output")
94
+
95
+
96
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
97
+ def retrieve_timesteps(
98
+ scheduler,
99
+ num_inference_steps: Optional[int] = None,
100
+ device: Optional[Union[str, torch.device]] = None,
101
+ timesteps: Optional[List[int]] = None,
102
+ sigmas: Optional[List[float]] = None,
103
+ **kwargs,
104
+ ):
105
+ r"""
106
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
107
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
108
+
109
+ Args:
110
+ scheduler (`SchedulerMixin`):
111
+ The scheduler to get timesteps from.
112
+ num_inference_steps (`int`):
113
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
114
+ must be `None`.
115
+ device (`str` or `torch.device`, *optional*):
116
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
117
+ timesteps (`List[int]`, *optional*):
118
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
119
+ `num_inference_steps` and `sigmas` must be `None`.
120
+ sigmas (`List[float]`, *optional*):
121
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
122
+ `num_inference_steps` and `timesteps` must be `None`.
123
+
124
+ Returns:
125
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
126
+ second element is the number of inference steps.
127
+ """
128
+ if timesteps is not None and sigmas is not None:
129
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
130
+ if timesteps is not None:
131
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
132
+ if not accepts_timesteps:
133
+ raise ValueError(
134
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
135
+ f" timestep schedules. Please check whether you are using the correct scheduler."
136
+ )
137
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
138
+ timesteps = scheduler.timesteps
139
+ num_inference_steps = len(timesteps)
140
+ elif sigmas is not None:
141
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
142
+ if not accept_sigmas:
143
+ raise ValueError(
144
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
145
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
146
+ )
147
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
148
+ timesteps = scheduler.timesteps
149
+ num_inference_steps = len(timesteps)
150
+ else:
151
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
152
+ timesteps = scheduler.timesteps
153
+ return timesteps, num_inference_steps
154
+
155
+
156
+ class ZImageDifferentialImg2ImgPipeline(DiffusionPipeline, ZImageLoraLoaderMixin, FromSingleFileMixin):
157
+ r"""
158
+ The ZImage pipeline for image-to-image generation.
159
+
160
+ Args:
161
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
162
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
163
+ vae ([`AutoencoderKL`]):
164
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
165
+ text_encoder ([`PreTrainedModel`]):
166
+ A text encoder model to encode text prompts.
167
+ tokenizer ([`AutoTokenizer`]):
168
+ A tokenizer to tokenize text prompts.
169
+ transformer ([`ZImageTransformer2DModel`]):
170
+ A ZImage transformer model to denoise the encoded image latents.
171
+ """
172
+
173
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
174
+ _optional_components = []
175
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
176
+
177
+ def __init__(
178
+ self,
179
+ scheduler: FlowMatchEulerDiscreteScheduler,
180
+ vae: AutoencoderKL,
181
+ text_encoder: PreTrainedModel,
182
+ tokenizer: AutoTokenizer,
183
+ transformer: ZImageTransformer2DModel,
184
+ ):
185
+ super().__init__()
186
+
187
+ self.register_modules(
188
+ vae=vae,
189
+ text_encoder=text_encoder,
190
+ tokenizer=tokenizer,
191
+ scheduler=scheduler,
192
+ transformer=transformer,
193
+ )
194
+ self.vae_scale_factor = (
195
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
196
+ )
197
+ latent_channels = self.vae.config.latent_channels if getattr(self, "vae", None) else 16
198
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
199
+
200
+ self.mask_processor = VaeImageProcessor(
201
+ vae_scale_factor=self.vae_scale_factor,
202
+ vae_latent_channels=latent_channels,
203
+ do_normalize=False,
204
+ do_binarize=False,
205
+ do_convert_grayscale=True,
206
+ )
207
+
208
+ # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline.encode_prompt
209
+ def encode_prompt(
210
+ self,
211
+ prompt: Union[str, List[str]],
212
+ device: Optional[torch.device] = None,
213
+ do_classifier_free_guidance: bool = True,
214
+ negative_prompt: Optional[Union[str, List[str]]] = None,
215
+ prompt_embeds: Optional[List[torch.FloatTensor]] = None,
216
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
217
+ max_sequence_length: int = 512,
218
+ ):
219
+ prompt = [prompt] if isinstance(prompt, str) else prompt
220
+ prompt_embeds = self._encode_prompt(
221
+ prompt=prompt,
222
+ device=device,
223
+ prompt_embeds=prompt_embeds,
224
+ max_sequence_length=max_sequence_length,
225
+ )
226
+
227
+ if do_classifier_free_guidance:
228
+ if negative_prompt is None:
229
+ negative_prompt = ["" for _ in prompt]
230
+ else:
231
+ negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
232
+ assert len(prompt) == len(negative_prompt)
233
+ negative_prompt_embeds = self._encode_prompt(
234
+ prompt=negative_prompt,
235
+ device=device,
236
+ prompt_embeds=negative_prompt_embeds,
237
+ max_sequence_length=max_sequence_length,
238
+ )
239
+ else:
240
+ negative_prompt_embeds = []
241
+ return prompt_embeds, negative_prompt_embeds
242
+
243
+ # Copied from diffusers.pipelines.z_image.pipeline_z_image.ZImagePipeline._encode_prompt
244
+ def _encode_prompt(
245
+ self,
246
+ prompt: Union[str, List[str]],
247
+ device: Optional[torch.device] = None,
248
+ prompt_embeds: Optional[List[torch.FloatTensor]] = None,
249
+ max_sequence_length: int = 512,
250
+ ) -> List[torch.FloatTensor]:
251
+ device = device or self._execution_device
252
+
253
+ if prompt_embeds is not None:
254
+ return prompt_embeds
255
+
256
+ if isinstance(prompt, str):
257
+ prompt = [prompt]
258
+
259
+ for i, prompt_item in enumerate(prompt):
260
+ messages = [
261
+ {"role": "user", "content": prompt_item},
262
+ ]
263
+ prompt_item = self.tokenizer.apply_chat_template(
264
+ messages,
265
+ tokenize=False,
266
+ add_generation_prompt=True,
267
+ enable_thinking=True,
268
+ )
269
+ prompt[i] = prompt_item
270
+
271
+ text_inputs = self.tokenizer(
272
+ prompt,
273
+ padding="max_length",
274
+ max_length=max_sequence_length,
275
+ truncation=True,
276
+ return_tensors="pt",
277
+ )
278
+
279
+ text_input_ids = text_inputs.input_ids.to(device)
280
+ prompt_masks = text_inputs.attention_mask.to(device).bool()
281
+
282
+ prompt_embeds = self.text_encoder(
283
+ input_ids=text_input_ids,
284
+ attention_mask=prompt_masks,
285
+ output_hidden_states=True,
286
+ ).hidden_states[-2]
287
+
288
+ embeddings_list = []
289
+
290
+ for i in range(len(prompt_embeds)):
291
+ embeddings_list.append(prompt_embeds[i][prompt_masks[i]])
292
+
293
+ return embeddings_list
294
+
295
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps
296
+ def get_timesteps(self, num_inference_steps, strength, device):
297
+ # get the original timestep using init_timestep
298
+ init_timestep = min(num_inference_steps * strength, num_inference_steps)
299
+
300
+ t_start = int(max(num_inference_steps - init_timestep, 0))
301
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
302
+ if hasattr(self.scheduler, "set_begin_index"):
303
+ self.scheduler.set_begin_index(t_start * self.scheduler.order)
304
+
305
+ return timesteps, num_inference_steps - t_start
306
+
307
+ @staticmethod
308
+ def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
309
+ latent_image_ids = torch.zeros(height // 2, width // 2, 3)
310
+ latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None]
311
+ latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :]
312
+
313
+ latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
314
+
315
+ latent_image_ids = latent_image_ids.reshape(
316
+ latent_image_id_height * latent_image_id_width, latent_image_id_channels
317
+ )
318
+
319
+ return latent_image_ids.to(device=device, dtype=dtype)
320
+
321
+ def prepare_latents(
322
+ self,
323
+ image,
324
+ timestep,
325
+ batch_size,
326
+ num_channels_latents,
327
+ height,
328
+ width,
329
+ dtype,
330
+ device,
331
+ generator,
332
+ latents=None,
333
+ ):
334
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
335
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
336
+
337
+ shape = (batch_size, num_channels_latents, height, width)
338
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype)
339
+
340
+ if latents is not None:
341
+ return latents.to(device=device, dtype=dtype)
342
+
343
+ # Encode the input image
344
+ image = image.to(device=device, dtype=dtype)
345
+ if image.shape[1] != num_channels_latents:
346
+ if isinstance(generator, list):
347
+ image_latents = [
348
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
349
+ for i in range(image.shape[0])
350
+ ]
351
+ image_latents = torch.cat(image_latents, dim=0)
352
+ else:
353
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
354
+
355
+ # Apply scaling (inverse of decoding: decode does latents/scaling_factor + shift_factor)
356
+ image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
357
+ else:
358
+ image_latents = image
359
+
360
+ # Handle batch size expansion
361
+ if batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] == 0:
362
+ additional_image_per_prompt = batch_size // image_latents.shape[0]
363
+ image_latents = torch.cat([image_latents] * additional_image_per_prompt, dim=0)
364
+ elif batch_size > image_latents.shape[0] and batch_size % image_latents.shape[0] != 0:
365
+ raise ValueError(
366
+ f"Cannot duplicate `image` of batch size {image_latents.shape[0]} to {batch_size} text prompts."
367
+ )
368
+
369
+ # Add noise using flow matching scale_noise
370
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
371
+ latents = self.scheduler.scale_noise(image_latents, timestep, noise)
372
+
373
+ return latents, noise, image_latents, latent_image_ids
374
+
375
+ def prepare_mask_latents(
376
+ self,
377
+ mask,
378
+ masked_image,
379
+ batch_size,
380
+ num_images_per_prompt,
381
+ height,
382
+ width,
383
+ dtype,
384
+ device,
385
+ generator,
386
+ ):
387
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
388
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
389
+ # resize the mask to latents shape as we concatenate the mask to the latents
390
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
391
+ # and half precision
392
+ mask = torch.nn.functional.interpolate(mask, size=(height, width))
393
+ mask = mask.to(device=device, dtype=dtype)
394
+
395
+ batch_size = batch_size * num_images_per_prompt
396
+
397
+ masked_image = masked_image.to(device=device, dtype=dtype)
398
+
399
+ if masked_image.shape[1] == 16:
400
+ masked_image_latents = masked_image
401
+ else:
402
+ masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator)
403
+
404
+ masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
405
+
406
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
407
+ if mask.shape[0] < batch_size:
408
+ if not batch_size % mask.shape[0] == 0:
409
+ raise ValueError(
410
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
411
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
412
+ " of masks that you pass is divisible by the total requested batch size."
413
+ )
414
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
415
+ if masked_image_latents.shape[0] < batch_size:
416
+ if not batch_size % masked_image_latents.shape[0] == 0:
417
+ raise ValueError(
418
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
419
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
420
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
421
+ )
422
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
423
+
424
+ # aligning device to prevent device errors when concating it with the latent model input
425
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
426
+
427
+ return mask, masked_image_latents
428
+
429
+ @property
430
+ def guidance_scale(self):
431
+ return self._guidance_scale
432
+
433
+ @property
434
+ def do_classifier_free_guidance(self):
435
+ return self._guidance_scale > 1
436
+
437
+ @property
438
+ def joint_attention_kwargs(self):
439
+ return self._joint_attention_kwargs
440
+
441
+ @property
442
+ def num_timesteps(self):
443
+ return self._num_timesteps
444
+
445
+ @property
446
+ def interrupt(self):
447
+ return self._interrupt
448
+
449
+ @torch.no_grad()
450
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
451
+ def __call__(
452
+ self,
453
+ prompt: Union[str, List[str]] = None,
454
+ image: PipelineImageInput = None,
455
+ mask_image: PipelineImageInput = None,
456
+ strength: float = 0.6,
457
+ height: Optional[int] = None,
458
+ width: Optional[int] = None,
459
+ num_inference_steps: int = 50,
460
+ sigmas: Optional[List[float]] = None,
461
+ guidance_scale: float = 5.0,
462
+ cfg_normalization: bool = False,
463
+ cfg_truncation: float = 1.0,
464
+ negative_prompt: Optional[Union[str, List[str]]] = None,
465
+ num_images_per_prompt: Optional[int] = 1,
466
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
467
+ latents: Optional[torch.FloatTensor] = None,
468
+ prompt_embeds: Optional[List[torch.FloatTensor]] = None,
469
+ negative_prompt_embeds: Optional[List[torch.FloatTensor]] = None,
470
+ output_type: Optional[str] = "pil",
471
+ return_dict: bool = True,
472
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
473
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
474
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
475
+ max_sequence_length: int = 512,
476
+ ):
477
+ r"""
478
+ Function invoked when calling the pipeline for image-to-image generation.
479
+
480
+ Args:
481
+ prompt (`str` or `List[str]`, *optional*):
482
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
483
+ instead.
484
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
485
+ `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
486
+ numpy array and pytorch tensor, the expected value range is between `[0, 1]`. If it's a tensor or a
487
+ list of tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or
488
+ a list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`.
489
+ mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
490
+ `Image`, numpy array or tensor representing an image batch to mask `image`. Black pixels in the mask
491
+ are repainted while white pixels are preserved. If `mask_image` is a PIL image, it is converted to a
492
+ single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one
493
+ color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B,
494
+ H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W,
495
+ 1)`, or `(H, W)`.
496
+ strength (`float`, *optional*, defaults to 0.6):
497
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
498
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
499
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
500
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
501
+ essentially ignores `image`.
502
+ height (`int`, *optional*, defaults to 1024):
503
+ The height in pixels of the generated image. If not provided, uses the input image height.
504
+ width (`int`, *optional*, defaults to 1024):
505
+ The width in pixels of the generated image. If not provided, uses the input image width.
506
+ num_inference_steps (`int`, *optional*, defaults to 50):
507
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
508
+ expense of slower inference.
509
+ sigmas (`List[float]`, *optional*):
510
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
511
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
512
+ will be used.
513
+ guidance_scale (`float`, *optional*, defaults to 5.0):
514
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
515
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
516
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
517
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
518
+ usually at the expense of lower image quality.
519
+ cfg_normalization (`bool`, *optional*, defaults to False):
520
+ Whether to apply configuration normalization.
521
+ cfg_truncation (`float`, *optional*, defaults to 1.0):
522
+ The truncation value for configuration.
523
+ negative_prompt (`str` or `List[str]`, *optional*):
524
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
525
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
526
+ less than `1`).
527
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
528
+ The number of images to generate per prompt.
529
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
530
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
531
+ to make generation deterministic.
532
+ latents (`torch.FloatTensor`, *optional*):
533
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
534
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
535
+ tensor will be generated by sampling using the supplied random `generator`.
536
+ prompt_embeds (`List[torch.FloatTensor]`, *optional*):
537
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
538
+ provided, text embeddings will be generated from `prompt` input argument.
539
+ negative_prompt_embeds (`List[torch.FloatTensor]`, *optional*):
540
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
541
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
542
+ argument.
543
+ output_type (`str`, *optional*, defaults to `"pil"`):
544
+ The output format of the generate image. Choose between
545
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
546
+ return_dict (`bool`, *optional*, defaults to `True`):
547
+ Whether or not to return a [`~pipelines.stable_diffusion.ZImagePipelineOutput`] instead of a plain
548
+ tuple.
549
+ joint_attention_kwargs (`dict`, *optional*):
550
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
551
+ `self.processor` in
552
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
553
+ callback_on_step_end (`Callable`, *optional*):
554
+ A function that calls at the end of each denoising steps during the inference. The function is called
555
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
556
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
557
+ `callback_on_step_end_tensor_inputs`.
558
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
559
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
560
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
561
+ `._callback_tensor_inputs` attribute of your pipeline class.
562
+ max_sequence_length (`int`, *optional*, defaults to 512):
563
+ Maximum sequence length to use with the `prompt`.
564
+
565
+ Examples:
566
+
567
+ Returns:
568
+ [`~pipelines.z_image.ZImagePipelineOutput`] or `tuple`: [`~pipelines.z_image.ZImagePipelineOutput`] if
569
+ `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the
570
+ generated images.
571
+ """
572
+ # 1. Check inputs and validate strength
573
+ if strength < 0 or strength > 1:
574
+ raise ValueError(f"The value of strength should be in [0.0, 1.0] but is {strength}")
575
+
576
+ # 2. Preprocess image
577
+ init_image = self.image_processor.preprocess(image)
578
+ init_image = init_image.to(dtype=torch.float32)
579
+
580
+ # Get dimensions from the preprocessed image if not specified
581
+ if height is None:
582
+ height = init_image.shape[-2]
583
+ if width is None:
584
+ width = init_image.shape[-1]
585
+
586
+ vae_scale = self.vae_scale_factor * 2
587
+ if height % vae_scale != 0:
588
+ raise ValueError(
589
+ f"Height must be divisible by {vae_scale} (got {height}). "
590
+ f"Please adjust the height to a multiple of {vae_scale}."
591
+ )
592
+ if width % vae_scale != 0:
593
+ raise ValueError(
594
+ f"Width must be divisible by {vae_scale} (got {width}). "
595
+ f"Please adjust the width to a multiple of {vae_scale}."
596
+ )
597
+
598
+ device = self._execution_device
599
+
600
+ self._guidance_scale = guidance_scale
601
+ self._joint_attention_kwargs = joint_attention_kwargs
602
+ self._interrupt = False
603
+ self._cfg_normalization = cfg_normalization
604
+ self._cfg_truncation = cfg_truncation
605
+
606
+ # 3. Define call parameters
607
+ if prompt is not None and isinstance(prompt, str):
608
+ batch_size = 1
609
+ elif prompt is not None and isinstance(prompt, list):
610
+ batch_size = len(prompt)
611
+ else:
612
+ batch_size = len(prompt_embeds)
613
+
614
+ # If prompt_embeds is provided and prompt is None, skip encoding
615
+ if prompt_embeds is not None and prompt is None:
616
+ if self.do_classifier_free_guidance and negative_prompt_embeds is None:
617
+ raise ValueError(
618
+ "When `prompt_embeds` is provided without `prompt`, "
619
+ "`negative_prompt_embeds` must also be provided for classifier-free guidance."
620
+ )
621
+ else:
622
+ (
623
+ prompt_embeds,
624
+ negative_prompt_embeds,
625
+ ) = self.encode_prompt(
626
+ prompt=prompt,
627
+ negative_prompt=negative_prompt,
628
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
629
+ prompt_embeds=prompt_embeds,
630
+ negative_prompt_embeds=negative_prompt_embeds,
631
+ device=device,
632
+ max_sequence_length=max_sequence_length,
633
+ )
634
+
635
+ # 4. Prepare latent variables
636
+ num_channels_latents = self.transformer.in_channels
637
+
638
+ # Repeat prompt_embeds for num_images_per_prompt
639
+ if num_images_per_prompt > 1:
640
+ prompt_embeds = [pe for pe in prompt_embeds for _ in range(num_images_per_prompt)]
641
+ if self.do_classifier_free_guidance and negative_prompt_embeds:
642
+ negative_prompt_embeds = [npe for npe in negative_prompt_embeds for _ in range(num_images_per_prompt)]
643
+
644
+ actual_batch_size = batch_size * num_images_per_prompt
645
+
646
+ # Calculate latent dimensions for image_seq_len
647
+ latent_height = 2 * (int(height) // (self.vae_scale_factor * 2))
648
+ latent_width = 2 * (int(width) // (self.vae_scale_factor * 2))
649
+ image_seq_len = (latent_height // 2) * (latent_width // 2)
650
+
651
+ # 5. Prepare timesteps
652
+ mu = calculate_shift(
653
+ image_seq_len,
654
+ self.scheduler.config.get("base_image_seq_len", 256),
655
+ self.scheduler.config.get("max_image_seq_len", 4096),
656
+ self.scheduler.config.get("base_shift", 0.5),
657
+ self.scheduler.config.get("max_shift", 1.15),
658
+ )
659
+ self.scheduler.sigma_min = 0.0
660
+ scheduler_kwargs = {"mu": mu}
661
+ timesteps, num_inference_steps = retrieve_timesteps(
662
+ self.scheduler,
663
+ num_inference_steps,
664
+ device,
665
+ sigmas=sigmas,
666
+ **scheduler_kwargs,
667
+ )
668
+
669
+ # 6. Adjust timesteps based on strength
670
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
671
+ if num_inference_steps < 1:
672
+ raise ValueError(
673
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline "
674
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
675
+ )
676
+ latent_timestep = timesteps[:1].repeat(actual_batch_size)
677
+
678
+ # 7. Prepare latents from image
679
+ latents, noise, original_image_latents, latent_image_ids = self.prepare_latents(
680
+ init_image,
681
+ latent_timestep,
682
+ actual_batch_size,
683
+ num_channels_latents,
684
+ height,
685
+ width,
686
+ prompt_embeds[0].dtype,
687
+ device,
688
+ generator,
689
+ latents,
690
+ )
691
+ resize_mode = "default"
692
+ crops_coords = None
693
+
694
+ # start diff diff preparation
695
+ original_mask = self.mask_processor.preprocess(
696
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
697
+ )
698
+
699
+ masked_image = init_image * original_mask
700
+ original_mask, _ = self.prepare_mask_latents(
701
+ original_mask,
702
+ masked_image,
703
+ batch_size,
704
+ num_images_per_prompt,
705
+ height,
706
+ width,
707
+ prompt_embeds[0].dtype,
708
+ device,
709
+ generator,
710
+ )
711
+ mask_thresholds = torch.arange(num_inference_steps, dtype=original_mask.dtype) / num_inference_steps
712
+ mask_thresholds = mask_thresholds.reshape(-1, 1, 1, 1).to(device)
713
+ masks = original_mask > mask_thresholds
714
+ # end diff diff preparation
715
+
716
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
717
+ self._num_timesteps = len(timesteps)
718
+
719
+ # 8. Denoising loop
720
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
721
+ for i, t in enumerate(timesteps):
722
+ if self.interrupt:
723
+ continue
724
+
725
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
726
+ timestep = t.expand(latents.shape[0])
727
+ timestep = (1000 - timestep) / 1000
728
+ # Normalized time for time-aware config (0 at start, 1 at end)
729
+ t_norm = timestep[0].item()
730
+
731
+ # Handle cfg truncation
732
+ current_guidance_scale = self.guidance_scale
733
+ if (
734
+ self.do_classifier_free_guidance
735
+ and self._cfg_truncation is not None
736
+ and float(self._cfg_truncation) <= 1
737
+ ):
738
+ if t_norm > self._cfg_truncation:
739
+ current_guidance_scale = 0.0
740
+
741
+ # Run CFG only if configured AND scale is non-zero
742
+ apply_cfg = self.do_classifier_free_guidance and current_guidance_scale > 0
743
+
744
+ if apply_cfg:
745
+ latents_typed = latents.to(self.transformer.dtype)
746
+ latent_model_input = latents_typed.repeat(2, 1, 1, 1)
747
+ prompt_embeds_model_input = prompt_embeds + negative_prompt_embeds
748
+ timestep_model_input = timestep.repeat(2)
749
+ else:
750
+ latent_model_input = latents.to(self.transformer.dtype)
751
+ prompt_embeds_model_input = prompt_embeds
752
+ timestep_model_input = timestep
753
+
754
+ latent_model_input = latent_model_input.unsqueeze(2)
755
+ latent_model_input_list = list(latent_model_input.unbind(dim=0))
756
+
757
+ model_out_list = self.transformer(
758
+ latent_model_input_list,
759
+ timestep_model_input,
760
+ prompt_embeds_model_input,
761
+ )[0]
762
+
763
+ if apply_cfg:
764
+ # Perform CFG
765
+ pos_out = model_out_list[:actual_batch_size]
766
+ neg_out = model_out_list[actual_batch_size:]
767
+
768
+ noise_pred = []
769
+ for j in range(actual_batch_size):
770
+ pos = pos_out[j].float()
771
+ neg = neg_out[j].float()
772
+
773
+ pred = pos + current_guidance_scale * (pos - neg)
774
+
775
+ # Renormalization
776
+ if self._cfg_normalization and float(self._cfg_normalization) > 0.0:
777
+ ori_pos_norm = torch.linalg.vector_norm(pos)
778
+ new_pos_norm = torch.linalg.vector_norm(pred)
779
+ max_new_norm = ori_pos_norm * float(self._cfg_normalization)
780
+ if new_pos_norm > max_new_norm:
781
+ pred = pred * (max_new_norm / new_pos_norm)
782
+
783
+ noise_pred.append(pred)
784
+
785
+ noise_pred = torch.stack(noise_pred, dim=0)
786
+ else:
787
+ noise_pred = torch.stack([t.float() for t in model_out_list], dim=0)
788
+
789
+ noise_pred = noise_pred.squeeze(2)
790
+ noise_pred = -noise_pred
791
+
792
+ # compute the previous noisy sample x_t -> x_t-1
793
+ latents = self.scheduler.step(noise_pred.to(torch.float32), t, latents, return_dict=False)[0]
794
+ assert latents.dtype == torch.float32
795
+
796
+ # start diff diff
797
+ image_latent = original_image_latents
798
+ latents_dtype = latents.dtype
799
+ if i < len(timesteps) - 1:
800
+ noise_timestep = timesteps[i + 1]
801
+ image_latent = self.scheduler.scale_noise(
802
+ original_image_latents, torch.tensor([noise_timestep]), noise
803
+ )
804
+
805
+ mask = masks[i].to(latents_dtype)
806
+ latents = image_latent * mask + latents * (1 - mask)
807
+ # end diff diff
808
+
809
+ if latents.dtype != latents_dtype:
810
+ if torch.backends.mps.is_available():
811
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
812
+ latents = latents.to(latents_dtype)
813
+
814
+ if callback_on_step_end is not None:
815
+ callback_kwargs = {}
816
+ for k in callback_on_step_end_tensor_inputs:
817
+ callback_kwargs[k] = locals()[k]
818
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
819
+
820
+ latents = callback_outputs.pop("latents", latents)
821
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
822
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
823
+
824
+ # call the callback, if provided
825
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
826
+ progress_bar.update()
827
+
828
+ if output_type == "latent":
829
+ image = latents
830
+
831
+ else:
832
+ latents = latents.to(self.vae.dtype)
833
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
834
+
835
+ image = self.vae.decode(latents, return_dict=False)[0]
836
+ image = self.image_processor.postprocess(image, output_type=output_type)
837
+
838
+ # Offload all models
839
+ self.maybe_free_model_hooks()
840
+
841
+ if not return_dict:
842
+ return (image,)
843
+
844
+ return ZImagePipelineOutput(images=image)