AlexGraikos commited on
Commit
0868870
·
verified ·
1 Parent(s): ab29603

Create pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +457 -0
pipeline.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 PixArt-Sigma Authors and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import html
16
+ import inspect
17
+ import re
18
+ import urllib.parse as ul
19
+ from typing import Callable, List, Optional, Tuple, Union
20
+
21
+ import torch
22
+
23
+ from diffusers.image_processor import PixArtImageProcessor
24
+ from diffusers.models import AutoencoderKL
25
+ from diffusers.schedulers import DPMSolverMultistepScheduler
26
+ from diffusers.utils import (
27
+ BACKENDS_MAPPING,
28
+ deprecate,
29
+ logging,
30
+ replace_example_docstring,
31
+ )
32
+ from diffusers.utils.torch_utils import randn_tensor
33
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
34
+
35
+ from pixcell_transformer_2d import PixCellTransformer2DModel
36
+
37
+
38
+ # TODO:
39
+ # Clean up the conditioning code
40
+ # Need to fix how the conditioning is provided
41
+ # Maybe add UNI to the pipeline
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+
47
+ EXAMPLE_DOC_STRING = """
48
+ Examples:
49
+ ```py
50
+ >>> import torch
51
+ >>> from diffusers import PixCellSigmaPipeline
52
+
53
+ >>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-Sigma-XL-2-512-MS" too.
54
+ >>> pipe = PixArtSigmaPipeline.from_pretrained(
55
+ ... "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", torch_dtype=torch.float16
56
+ ... )
57
+ >>> # Enable memory optimizations.
58
+ >>> # pipe.enable_model_cpu_offload()
59
+
60
+ >>> prompt = "A small cactus with a happy face in the Sahara desert."
61
+ >>> image = pipe(prompt).images[0]
62
+ ```
63
+ """
64
+
65
+
66
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
67
+ def retrieve_timesteps(
68
+ scheduler,
69
+ num_inference_steps: Optional[int] = None,
70
+ device: Optional[Union[str, torch.device]] = None,
71
+ timesteps: Optional[List[int]] = None,
72
+ sigmas: Optional[List[float]] = None,
73
+ **kwargs,
74
+ ):
75
+ r"""
76
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
77
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
78
+
79
+ Args:
80
+ scheduler (`SchedulerMixin`):
81
+ The scheduler to get timesteps from.
82
+ num_inference_steps (`int`):
83
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
84
+ must be `None`.
85
+ device (`str` or `torch.device`, *optional*):
86
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
87
+ timesteps (`List[int]`, *optional*):
88
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
89
+ `num_inference_steps` and `sigmas` must be `None`.
90
+ sigmas (`List[float]`, *optional*):
91
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
92
+ `num_inference_steps` and `timesteps` must be `None`.
93
+
94
+ Returns:
95
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
96
+ second element is the number of inference steps.
97
+ """
98
+ if timesteps is not None and sigmas is not None:
99
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
100
+ if timesteps is not None:
101
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
102
+ if not accepts_timesteps:
103
+ raise ValueError(
104
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
105
+ f" timestep schedules. Please check whether you are using the correct scheduler."
106
+ )
107
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
108
+ timesteps = scheduler.timesteps
109
+ num_inference_steps = len(timesteps)
110
+ elif sigmas is not None:
111
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
112
+ if not accept_sigmas:
113
+ raise ValueError(
114
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
115
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
116
+ )
117
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
118
+ timesteps = scheduler.timesteps
119
+ num_inference_steps = len(timesteps)
120
+ else:
121
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
122
+ timesteps = scheduler.timesteps
123
+ return timesteps, num_inference_steps
124
+
125
+
126
+ class PixCellPipeline(DiffusionPipeline):
127
+ r"""
128
+ Pipeline for SSL-to-image generation using PixCell.
129
+ """
130
+
131
+ model_cpu_offload_seq = "transformer->vae"
132
+
133
+ def __init__(
134
+ self,
135
+ vae: AutoencoderKL,
136
+ transformer: PixCellTransformer2DModel,
137
+ scheduler: DPMSolverMultistepScheduler,
138
+ ):
139
+ super().__init__()
140
+
141
+ self.register_modules(
142
+ vae=vae, transformer=transformer, scheduler=scheduler
143
+ )
144
+
145
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
146
+ self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor)
147
+
148
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
149
+ def prepare_extra_step_kwargs(self, generator, eta):
150
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
151
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
152
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
153
+ # and should be between [0, 1]
154
+
155
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
156
+ extra_step_kwargs = {}
157
+ if accepts_eta:
158
+ extra_step_kwargs["eta"] = eta
159
+
160
+ # check if the scheduler accepts generator
161
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
162
+ if accepts_generator:
163
+ extra_step_kwargs["generator"] = generator
164
+ return extra_step_kwargs
165
+
166
+ def get_unconditional_embedding(self, batch_size=1):
167
+ # Unconditional embedding is learned
168
+ uncond = self.transformer.caption_projection.uncond_embedding.clone().tile(batch_size,1,1)
169
+ return uncond
170
+
171
+ # Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.check_inputs
172
+ def check_inputs(
173
+ self,
174
+ height,
175
+ width,
176
+ callback_steps,
177
+ uni_embeds=None,
178
+ negative_uni_embeds=None,
179
+ guidance_scale=None,
180
+ ):
181
+ if height % 8 != 0 or width % 8 != 0:
182
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
183
+
184
+ if (callback_steps is None) or (
185
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
186
+ ):
187
+ raise ValueError(
188
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
189
+ f" {type(callback_steps)}."
190
+ )
191
+
192
+ if uni_embeds is None:
193
+ raise ValueError(
194
+ "Provide a UNI embedding `uni_embeds`."
195
+ )
196
+ elif len(uni_embeds.shape) != 3:
197
+ raise ValueError(
198
+ "UNI embedding given is not in (B,N,D)."
199
+ )
200
+ elif uni_embeds.shape[1] != self.transformer.config.caption_num_tokens:
201
+ raise ValueError(
202
+ f"Number of UNI embeddings must match the ones used in training ({self.transformer.config.caption_num_tokens})."
203
+ )
204
+ elif uni_embeds.shape[2] != self.transformer.config.caption_channels:
205
+ raise ValueError(
206
+ "UNI embedding given has incorrect dimenions."
207
+ )
208
+
209
+ if guidance_scale > 1.0:
210
+ if negative_uni_embeds is None:
211
+ raise ValueError(
212
+ "Provide a negative UNI embedding `negative_uni_embeds`."
213
+ )
214
+ elif len(negative_uni_embeds.shape) != 3:
215
+ raise ValueError(
216
+ "Negative UNI embedding given is not in (B,N,D)."
217
+ )
218
+ elif negative_uni_embeds.shape[1] != self.transformer.config.caption_num_tokens:
219
+ raise ValueError(
220
+ f"Number of negative UNI embeddings must match the ones used in training ({self.transformer.config.caption_num_tokens})."
221
+ )
222
+ elif negative_uni_embeds.shape[2] != self.transformer.config.caption_channels:
223
+ raise ValueError(
224
+ "Negative UNI embedding given has incorrect dimenions."
225
+ )
226
+
227
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
228
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
229
+ shape = (
230
+ batch_size,
231
+ num_channels_latents,
232
+ int(height) // self.vae_scale_factor,
233
+ int(width) // self.vae_scale_factor,
234
+ )
235
+ if isinstance(generator, list) and len(generator) != batch_size:
236
+ raise ValueError(
237
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
238
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
239
+ )
240
+
241
+ if latents is None:
242
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
243
+ else:
244
+ latents = latents.to(device)
245
+
246
+ # scale the initial noise by the standard deviation required by the scheduler
247
+ latents = latents * self.scheduler.init_noise_sigma
248
+ return latents
249
+
250
+ @torch.no_grad()
251
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
252
+ def __call__(
253
+ self,
254
+ num_inference_steps: int = 20,
255
+ timesteps: List[int] = None,
256
+ sigmas: List[float] = None,
257
+ guidance_scale: float = 1.5,
258
+ num_images_per_prompt: Optional[int] = 1,
259
+ height: Optional[int] = None,
260
+ width: Optional[int] = None,
261
+ eta: float = 0.0,
262
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
263
+ latents: Optional[torch.Tensor] = None,
264
+ uni_embeds: Optional[torch.Tensor] = None,
265
+ negative_uni_embeds: Optional[torch.Tensor] = None,
266
+ output_type: Optional[str] = "pil",
267
+ return_dict: bool = True,
268
+ callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
269
+ callback_steps: int = 1,
270
+ **kwargs,
271
+ ) -> Union[ImagePipelineOutput, Tuple]:
272
+ """
273
+ Function invoked when calling the pipeline for generation.
274
+
275
+ Args:
276
+ num_inference_steps (`int`, *optional*, defaults to 100):
277
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
278
+ expense of slower inference.
279
+ timesteps (`List[int]`, *optional*):
280
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
281
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
282
+ passed will be used. Must be in descending order.
283
+ sigmas (`List[float]`, *optional*):
284
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
285
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
286
+ will be used.
287
+ guidance_scale (`float`, *optional*, defaults to 4.5):
288
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
289
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
290
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
291
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
292
+ usually at the expense of lower image quality.
293
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
294
+ The number of images to generate per prompt.
295
+ height (`int`, *optional*, defaults to self.unet.config.sample_size):
296
+ The height in pixels of the generated image.
297
+ width (`int`, *optional*, defaults to self.unet.config.sample_size):
298
+ The width in pixels of the generated image.
299
+ eta (`float`, *optional*, defaults to 0.0):
300
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
301
+ [`schedulers.DDIMScheduler`], will be ignored for others.
302
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
303
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
304
+ to make generation deterministic.
305
+ latents (`torch.Tensor`, *optional*):
306
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
307
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
308
+ tensor will ge generated by sampling using the supplied random `generator`.
309
+ uni_embeds (`torch.Tensor`, *optional*):
310
+ Pre-generated UNI embeddings.
311
+ negative_uni_embeds (`torch.Tensor`, *optional*):
312
+ Pre-generated negative UNI embeddings.
313
+ output_type (`str`, *optional*, defaults to `"pil"`):
314
+ The output format of the generate image. Choose between
315
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
316
+ return_dict (`bool`, *optional*, defaults to `True`):
317
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
318
+ callback (`Callable`, *optional*):
319
+ A function that will be called every `callback_steps` steps during inference. The function will be
320
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
321
+ callback_steps (`int`, *optional*, defaults to 1):
322
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
323
+ called at every step.
324
+
325
+ Examples:
326
+
327
+ Returns:
328
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
329
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
330
+ returned where the first element is a list with the generated images
331
+ """
332
+ # 1. Check inputs. Raise error if not correct
333
+ height = height or self.transformer.config.sample_size * self.vae_scale_factor
334
+ width = width or self.transformer.config.sample_size * self.vae_scale_factor
335
+
336
+ self.check_inputs(
337
+ height,
338
+ width,
339
+ callback_steps,
340
+ uni_embeds,
341
+ negative_uni_embeds,
342
+ guidance_scale,
343
+ )
344
+
345
+ # 2. Default height and width to transformer
346
+ batch_size = uni_embeds.shape[0]
347
+
348
+ device = self._execution_device
349
+
350
+ # 3. Handle UNI conditioning
351
+
352
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
353
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
354
+ # corresponds to doing no classifier free guidance.
355
+ do_classifier_free_guidance = guidance_scale > 1.0
356
+
357
+ uni_embeds = uni_embeds.repeat_interleave(num_images_per_prompt, dim=0)
358
+ if do_classifier_free_guidance:
359
+ negative_uni_embeds = negative_uni_embeds.repeat_interleave(num_images_per_prompt, dim=0)
360
+ uni_embeds = torch.cat([negative_uni_embeds, uni_embeds], dim=0)
361
+
362
+
363
+ # 4. Prepare timesteps
364
+ timesteps, num_inference_steps = retrieve_timesteps(
365
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
366
+ )
367
+
368
+ # 5. Prepare latents.
369
+ latent_channels = self.transformer.config.in_channels
370
+ latents = self.prepare_latents(
371
+ batch_size * num_images_per_prompt,
372
+ latent_channels,
373
+ height,
374
+ width,
375
+ uni_embeds.dtype,
376
+ device,
377
+ generator,
378
+ latents,
379
+ )
380
+
381
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
382
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
383
+
384
+ added_cond_kwargs = {}
385
+
386
+ # 7. Denoising loop
387
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
388
+
389
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
390
+ for i, t in enumerate(timesteps):
391
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
392
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
393
+
394
+ current_timestep = t
395
+ if not torch.is_tensor(current_timestep):
396
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
397
+ # This would be a good case for the `match` statement (Python 3.10+)
398
+ is_mps = latent_model_input.device.type == "mps"
399
+ if isinstance(current_timestep, float):
400
+ dtype = torch.float32 if is_mps else torch.float64
401
+ else:
402
+ dtype = torch.int32 if is_mps else torch.int64
403
+ current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device)
404
+ elif len(current_timestep.shape) == 0:
405
+ current_timestep = current_timestep[None].to(latent_model_input.device)
406
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
407
+ current_timestep = current_timestep.expand(latent_model_input.shape[0])
408
+
409
+ # predict noise model_output
410
+ noise_pred = self.transformer(
411
+ latent_model_input,
412
+ encoder_hidden_states=uni_embeds,
413
+ timestep=current_timestep,
414
+ added_cond_kwargs=added_cond_kwargs,
415
+ return_dict=False,
416
+ )[0]
417
+
418
+ # perform guidance
419
+ if do_classifier_free_guidance:
420
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
421
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
422
+
423
+ # learned sigma
424
+ if self.transformer.config.out_channels // 2 == latent_channels:
425
+ noise_pred = noise_pred.chunk(2, dim=1)[0]
426
+ else:
427
+ noise_pred = noise_pred
428
+
429
+ # compute previous image: x_t -> x_t-1
430
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
431
+
432
+ # call the callback, if provided
433
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
434
+ progress_bar.update()
435
+ if callback is not None and i % callback_steps == 0:
436
+ step_idx = i // getattr(self.scheduler, "order", 1)
437
+ callback(step_idx, t, latents)
438
+
439
+ if not output_type == "latent":
440
+ vae_scale = self.vae.config.scaling_factor
441
+ vae_shift = getattr(self.vae.config, "shift_factor", 0)
442
+
443
+ image = self.vae.decode((latents / vae_scale) + vae_shift, return_dict=False)[0]
444
+
445
+ else:
446
+ image = latents
447
+
448
+ if not output_type == "latent":
449
+ image = self.image_processor.postprocess(image, output_type=output_type)
450
+
451
+ # Offload all models
452
+ self.maybe_free_model_hooks()
453
+
454
+ if not return_dict:
455
+ return (image,)
456
+
457
+ return ImagePipelineOutput(images=image)