Spaces:
Sleeping
Sleeping
FollowYourEmoji
/
diffusers
/pipelines
/latent_diffusion_uncond
/pipeline_latent_diffusion_uncond.py
| # Copyright 2023 The HuggingFace Team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import inspect | |
| from typing import List, Optional, Tuple, Union | |
| import torch | |
| from ...models import UNet2DModel, VQModel | |
| from ...schedulers import DDIMScheduler | |
| from ...utils.torch_utils import randn_tensor | |
| from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput | |
| class LDMPipeline(DiffusionPipeline): | |
| r""" | |
| Pipeline for unconditional image generation using latent diffusion. | |
| This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods | |
| implemented for all pipelines (downloading, saving, running on a particular device, etc.). | |
| Parameters: | |
| vqvae ([`VQModel`]): | |
| Vector-quantized (VQ) model to encode and decode images to and from latent representations. | |
| unet ([`UNet2DModel`]): | |
| A `UNet2DModel` to denoise the encoded image latents. | |
| scheduler ([`SchedulerMixin`]): | |
| [`DDIMScheduler`] is used in combination with `unet` to denoise the encoded image latents. | |
| """ | |
| def __init__(self, vqvae: VQModel, unet: UNet2DModel, scheduler: DDIMScheduler): | |
| super().__init__() | |
| self.register_modules(vqvae=vqvae, unet=unet, scheduler=scheduler) | |
| def __call__( | |
| self, | |
| batch_size: int = 1, | |
| generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | |
| eta: float = 0.0, | |
| num_inference_steps: int = 50, | |
| output_type: Optional[str] = "pil", | |
| return_dict: bool = True, | |
| **kwargs, | |
| ) -> Union[Tuple, ImagePipelineOutput]: | |
| r""" | |
| The call function to the pipeline for generation. | |
| Args: | |
| batch_size (`int`, *optional*, defaults to 1): | |
| Number of images to generate. | |
| generator (`torch.Generator`, *optional*): | |
| A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make | |
| generation deterministic. | |
| num_inference_steps (`int`, *optional*, defaults to 50): | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. | |
| output_type (`str`, *optional*, defaults to `"pil"`): | |
| The output format of the generated image. Choose between `PIL.Image` or `np.array`. | |
| return_dict (`bool`, *optional*, defaults to `True`): | |
| Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. | |
| Example: | |
| ```py | |
| >>> from diffusers import LDMPipeline | |
| >>> # load model and scheduler | |
| >>> pipe = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256") | |
| >>> # run pipeline in inference (sample random noise and denoise) | |
| >>> image = pipe().images[0] | |
| ``` | |
| Returns: | |
| [`~pipelines.ImagePipelineOutput`] or `tuple`: | |
| If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is | |
| returned where the first element is a list with the generated images | |
| """ | |
| latents = randn_tensor( | |
| (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), | |
| generator=generator, | |
| ) | |
| latents = latents.to(self.device) | |
| # scale the initial noise by the standard deviation required by the scheduler | |
| latents = latents * self.scheduler.init_noise_sigma | |
| self.scheduler.set_timesteps(num_inference_steps) | |
| # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature | |
| accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) | |
| extra_kwargs = {} | |
| if accepts_eta: | |
| extra_kwargs["eta"] = eta | |
| for t in self.progress_bar(self.scheduler.timesteps): | |
| latent_model_input = self.scheduler.scale_model_input(latents, t) | |
| # predict the noise residual | |
| noise_prediction = self.unet(latent_model_input, t).sample | |
| # compute the previous noisy sample x_t -> x_t-1 | |
| latents = self.scheduler.step(noise_prediction, t, latents, **extra_kwargs).prev_sample | |
| # adjust latents with inverse of vae scale | |
| latents = latents / self.vqvae.config.scaling_factor | |
| # decode the image latents with the VAE | |
| image = self.vqvae.decode(latents).sample | |
| image = (image / 2 + 0.5).clamp(0, 1) | |
| image = image.cpu().permute(0, 2, 3, 1).numpy() | |
| if output_type == "pil": | |
| image = self.numpy_to_pil(image) | |
| if not return_dict: | |
| return (image,) | |
| return ImagePipelineOutput(images=image) | |