| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import inspect |
| | from typing import List, Optional, Tuple, Union |
| |
|
| | import torch |
| | from torch.nn import functional as F |
| |
|
| | from transformers import CLIPTextModelWithProjection, CLIPTokenizer |
| | from transformers.models.clip.modeling_clip import CLIPTextModelOutput |
| |
|
| | from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel |
| | from ...pipelines import DiffusionPipeline, ImagePipelineOutput |
| | from ...schedulers import UnCLIPScheduler |
| | from ...utils import is_accelerate_available, logging, randn_tensor |
| | from .text_proj import UnCLIPTextProjModel |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | class UnCLIPPipeline(DiffusionPipeline): |
| | """ |
| | Pipeline for text-to-image generation using unCLIP |
| | This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the |
| | library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) |
| | Args: |
| | text_encoder ([`CLIPTextModelWithProjection`]): |
| | Frozen text-encoder. |
| | tokenizer (`CLIPTokenizer`): |
| | Tokenizer of class |
| | [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). |
| | prior ([`PriorTransformer`]): |
| | The canonincal unCLIP prior to approximate the image embedding from the text embedding. |
| | text_proj ([`UnCLIPTextProjModel`]): |
| | Utility class to prepare and combine the embeddings before they are passed to the decoder. |
| | decoder ([`UNet2DConditionModel`]): |
| | The decoder to invert the image embedding into an image. |
| | super_res_first ([`UNet2DModel`]): |
| | Super resolution unet. Used in all but the last step of the super resolution diffusion process. |
| | super_res_last ([`UNet2DModel`]): |
| | Super resolution unet. Used in the last step of the super resolution diffusion process. |
| | prior_scheduler ([`UnCLIPScheduler`]): |
| | Scheduler used in the prior denoising process. Just a modified DDPMScheduler. |
| | decoder_scheduler ([`UnCLIPScheduler`]): |
| | Scheduler used in the decoder denoising process. Just a modified DDPMScheduler. |
| | super_res_scheduler ([`UnCLIPScheduler`]): |
| | Scheduler used in the super resolution denoising process. Just a modified DDPMScheduler. |
| | """ |
| |
|
| | prior: PriorTransformer |
| | decoder: UNet2DConditionModel |
| | text_proj: UnCLIPTextProjModel |
| | text_encoder: CLIPTextModelWithProjection |
| | tokenizer: CLIPTokenizer |
| | super_res_first: UNet2DModel |
| | super_res_last: UNet2DModel |
| |
|
| | prior_scheduler: UnCLIPScheduler |
| | decoder_scheduler: UnCLIPScheduler |
| | super_res_scheduler: UnCLIPScheduler |
| |
|
| | def __init__( |
| | self, |
| | prior: PriorTransformer, |
| | decoder: UNet2DConditionModel, |
| | text_encoder: CLIPTextModelWithProjection, |
| | tokenizer: CLIPTokenizer, |
| | text_proj: UnCLIPTextProjModel, |
| | super_res_first: UNet2DModel, |
| | super_res_last: UNet2DModel, |
| | prior_scheduler: UnCLIPScheduler, |
| | decoder_scheduler: UnCLIPScheduler, |
| | super_res_scheduler: UnCLIPScheduler, |
| | ): |
| | super().__init__() |
| |
|
| | self.register_modules( |
| | prior=prior, |
| | decoder=decoder, |
| | text_encoder=text_encoder, |
| | tokenizer=tokenizer, |
| | text_proj=text_proj, |
| | super_res_first=super_res_first, |
| | super_res_last=super_res_last, |
| | prior_scheduler=prior_scheduler, |
| | decoder_scheduler=decoder_scheduler, |
| | super_res_scheduler=super_res_scheduler, |
| | ) |
| |
|
| | def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): |
| | if latents is None: |
| | latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
| | else: |
| | if latents.shape != shape: |
| | raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") |
| | latents = latents.to(device) |
| |
|
| | latents = latents * scheduler.init_noise_sigma |
| | return latents |
| |
|
| | def _encode_prompt( |
| | self, |
| | prompt, |
| | device, |
| | num_images_per_prompt, |
| | do_classifier_free_guidance, |
| | text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, |
| | text_attention_mask: Optional[torch.Tensor] = None, |
| | ): |
| | if text_model_output is None: |
| | batch_size = len(prompt) if isinstance(prompt, list) else 1 |
| | |
| | text_inputs = self.tokenizer( |
| | prompt, |
| | padding="max_length", |
| | max_length=self.tokenizer.model_max_length, |
| | return_tensors="pt", |
| | ) |
| | text_input_ids = text_inputs.input_ids |
| | text_mask = text_inputs.attention_mask.bool().to(device) |
| |
|
| | if text_input_ids.shape[-1] > self.tokenizer.model_max_length: |
| | removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) |
| | logger.warning( |
| | "The following part of your input was truncated because CLIP can only handle sequences up to" |
| | f" {self.tokenizer.model_max_length} tokens: {removed_text}" |
| | ) |
| | text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] |
| |
|
| | text_encoder_output = self.text_encoder(text_input_ids.to(device)) |
| |
|
| | text_embeddings = text_encoder_output.text_embeds |
| | text_encoder_hidden_states = text_encoder_output.last_hidden_state |
| |
|
| | else: |
| | batch_size = text_model_output[0].shape[0] |
| | text_embeddings, text_encoder_hidden_states = text_model_output[0], text_model_output[1] |
| | text_mask = text_attention_mask |
| |
|
| | text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0) |
| | text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) |
| | text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) |
| |
|
| | if do_classifier_free_guidance: |
| | uncond_tokens = [""] * batch_size |
| |
|
| | uncond_input = self.tokenizer( |
| | uncond_tokens, |
| | padding="max_length", |
| | max_length=self.tokenizer.model_max_length, |
| | truncation=True, |
| | return_tensors="pt", |
| | ) |
| | uncond_text_mask = uncond_input.attention_mask.bool().to(device) |
| | uncond_embeddings_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) |
| |
|
| | uncond_embeddings = uncond_embeddings_text_encoder_output.text_embeds |
| | uncond_text_encoder_hidden_states = uncond_embeddings_text_encoder_output.last_hidden_state |
| |
|
| | |
| |
|
| | seq_len = uncond_embeddings.shape[1] |
| | uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt) |
| | uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len) |
| |
|
| | seq_len = uncond_text_encoder_hidden_states.shape[1] |
| | uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) |
| | uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( |
| | batch_size * num_images_per_prompt, seq_len, -1 |
| | ) |
| | uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
| | text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) |
| |
|
| | text_mask = torch.cat([uncond_text_mask, text_mask]) |
| |
|
| | return text_embeddings, text_encoder_hidden_states, text_mask |
| |
|
| | def enable_sequential_cpu_offload(self, gpu_id=0): |
| | r""" |
| | Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's |
| | models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only |
| | when their specific submodule has its `forward` method called. |
| | """ |
| | if is_accelerate_available(): |
| | from accelerate import cpu_offload |
| | else: |
| | raise ImportError("Please install accelerate via `pip install accelerate`") |
| |
|
| | device = torch.device(f"cuda:{gpu_id}") |
| |
|
| | |
| | models = [ |
| | self.decoder, |
| | self.text_proj, |
| | self.text_encoder, |
| | self.super_res_first, |
| | self.super_res_last, |
| | ] |
| | for cpu_offloaded_model in models: |
| | if cpu_offloaded_model is not None: |
| | cpu_offload(cpu_offloaded_model, device) |
| |
|
| | @property |
| | def _execution_device(self): |
| | r""" |
| | Returns the device on which the pipeline's models will be executed. After calling |
| | `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module |
| | hooks. |
| | """ |
| | if self.device != torch.device("meta") or not hasattr(self.decoder, "_hf_hook"): |
| | return self.device |
| | for module in self.decoder.modules(): |
| | if ( |
| | hasattr(module, "_hf_hook") |
| | and hasattr(module._hf_hook, "execution_device") |
| | and module._hf_hook.execution_device is not None |
| | ): |
| | return torch.device(module._hf_hook.execution_device) |
| | return self.device |
| |
|
| | @torch.no_grad() |
| | def __call__( |
| | self, |
| | prompt: Optional[Union[str, List[str]]] = None, |
| | num_images_per_prompt: int = 1, |
| | prior_num_inference_steps: int = 25, |
| | decoder_num_inference_steps: int = 25, |
| | super_res_num_inference_steps: int = 7, |
| | generator: Optional[torch.Generator] = None, |
| | prior_latents: Optional[torch.FloatTensor] = None, |
| | decoder_latents: Optional[torch.FloatTensor] = None, |
| | super_res_latents: Optional[torch.FloatTensor] = None, |
| | text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, |
| | text_attention_mask: Optional[torch.Tensor] = None, |
| | prior_guidance_scale: float = 4.0, |
| | decoder_guidance_scale: float = 8.0, |
| | output_type: Optional[str] = "pil", |
| | return_dict: bool = True, |
| | ): |
| | """ |
| | Function invoked when calling the pipeline for generation. |
| | Args: |
| | prompt (`str` or `List[str]`): |
| | The prompt or prompts to guide the image generation. This can only be left undefined if |
| | `text_model_output` and `text_attention_mask` is passed. |
| | num_images_per_prompt (`int`, *optional*, defaults to 1): |
| | The number of images to generate per prompt. |
| | prior_num_inference_steps (`int`, *optional*, defaults to 25): |
| | The number of denoising steps for the prior. More denoising steps usually lead to a higher quality |
| | image at the expense of slower inference. |
| | decoder_num_inference_steps (`int`, *optional*, defaults to 25): |
| | The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality |
| | image at the expense of slower inference. |
| | super_res_num_inference_steps (`int`, *optional*, defaults to 7): |
| | The number of denoising steps for super resolution. More denoising steps usually lead to a higher |
| | quality image at the expense of slower inference. |
| | generator (`torch.Generator`, *optional*): |
| | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) |
| | to make generation deterministic. |
| | prior_latents (`torch.FloatTensor` of shape (batch size, embeddings dimension), *optional*): |
| | Pre-generated noisy latents to be used as inputs for the prior. |
| | decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*): |
| | Pre-generated noisy latents to be used as inputs for the decoder. |
| | super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*): |
| | Pre-generated noisy latents to be used as inputs for the decoder. |
| | prior_guidance_scale (`float`, *optional*, defaults to 4.0): |
| | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). |
| | `guidance_scale` is defined as `w` of equation 2. of [Imagen |
| | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > |
| | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, |
| | usually at the expense of lower image quality. |
| | decoder_guidance_scale (`float`, *optional*, defaults to 4.0): |
| | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). |
| | `guidance_scale` is defined as `w` of equation 2. of [Imagen |
| | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > |
| | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, |
| | usually at the expense of lower image quality. |
| | text_model_output (`CLIPTextModelOutput`, *optional*): |
| | Pre-defined CLIPTextModel outputs that can be derived from the text encoder. Pre-defined text outputs |
| | can be passed for tasks like text embedding interpolations. Make sure to also pass |
| | `text_attention_mask` in this case. `prompt` can the be left to `None`. |
| | text_attention_mask (`torch.Tensor`, *optional*): |
| | Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention |
| | masks are necessary when passing `text_model_output`. |
| | output_type (`str`, *optional*, defaults to `"pil"`): |
| | The output format of the generated image. Choose between |
| | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. |
| | return_dict (`bool`, *optional*, defaults to `True`): |
| | Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. |
| | """ |
| | if prompt is not None: |
| | if isinstance(prompt, str): |
| | batch_size = 1 |
| | elif isinstance(prompt, list): |
| | batch_size = len(prompt) |
| | else: |
| | raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") |
| | else: |
| | batch_size = text_model_output[0].shape[0] |
| |
|
| | device = self._execution_device |
| |
|
| | batch_size = batch_size * num_images_per_prompt |
| |
|
| | do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 |
| |
|
| | text_embeddings, text_encoder_hidden_states, text_mask = self._encode_prompt( |
| | prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask |
| | ) |
| |
|
| | |
| |
|
| | self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) |
| | prior_timesteps_tensor = self.prior_scheduler.timesteps |
| |
|
| | embedding_dim = self.prior.config.embedding_dim |
| |
|
| | prior_latents = self.prepare_latents( |
| | (batch_size, embedding_dim), |
| | text_embeddings.dtype, |
| | device, |
| | generator, |
| | prior_latents, |
| | self.prior_scheduler, |
| | ) |
| |
|
| | for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): |
| | |
| | latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents |
| |
|
| | predicted_image_embedding = self.prior( |
| | latent_model_input, |
| | timestep=t, |
| | proj_embedding=text_embeddings, |
| | encoder_hidden_states=text_encoder_hidden_states, |
| | attention_mask=text_mask, |
| | ).predicted_image_embedding |
| |
|
| | if do_classifier_free_guidance: |
| | predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) |
| | predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( |
| | predicted_image_embedding_text - predicted_image_embedding_uncond |
| | ) |
| |
|
| | if i + 1 == prior_timesteps_tensor.shape[0]: |
| | prev_timestep = None |
| | else: |
| | prev_timestep = prior_timesteps_tensor[i + 1] |
| |
|
| | prior_latents = self.prior_scheduler.step( |
| | predicted_image_embedding, |
| | timestep=t, |
| | sample=prior_latents, |
| | generator=generator, |
| | prev_timestep=prev_timestep, |
| | ).prev_sample |
| |
|
| | prior_latents = self.prior.post_process_latents(prior_latents) |
| |
|
| | image_embeddings = prior_latents |
| |
|
| | |
| |
|
| | |
| |
|
| | text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( |
| | image_embeddings=image_embeddings, |
| | text_embeddings=text_embeddings, |
| | text_encoder_hidden_states=text_encoder_hidden_states, |
| | do_classifier_free_guidance=do_classifier_free_guidance, |
| | ) |
| |
|
| | decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) |
| |
|
| | self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) |
| | decoder_timesteps_tensor = self.decoder_scheduler.timesteps |
| |
|
| | num_channels_latents = self.decoder.in_channels |
| | height = self.decoder.sample_size |
| | width = self.decoder.sample_size |
| |
|
| | decoder_latents = self.prepare_latents( |
| | (batch_size, num_channels_latents, height, width), |
| | text_encoder_hidden_states.dtype, |
| | device, |
| | generator, |
| | decoder_latents, |
| | self.decoder_scheduler, |
| | ) |
| |
|
| | for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): |
| | |
| | latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents |
| |
|
| | noise_pred = self.decoder( |
| | sample=latent_model_input, |
| | timestep=t, |
| | encoder_hidden_states=text_encoder_hidden_states, |
| | class_labels=additive_clip_time_embeddings, |
| | attention_mask=decoder_text_mask, |
| | ).sample |
| |
|
| | if do_classifier_free_guidance: |
| | noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| | noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) |
| | noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) |
| | noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) |
| | noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) |
| |
|
| | if i + 1 == decoder_timesteps_tensor.shape[0]: |
| | prev_timestep = None |
| | else: |
| | prev_timestep = decoder_timesteps_tensor[i + 1] |
| |
|
| | |
| | decoder_latents = self.decoder_scheduler.step( |
| | noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator |
| | ).prev_sample |
| |
|
| | decoder_latents = decoder_latents.clamp(-1, 1) |
| |
|
| | image_small = decoder_latents |
| |
|
| | |
| |
|
| | |
| |
|
| | self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) |
| | super_res_timesteps_tensor = self.super_res_scheduler.timesteps |
| |
|
| | channels = self.super_res_first.in_channels // 2 |
| | height = self.super_res_first.sample_size |
| | width = self.super_res_first.sample_size |
| |
|
| | super_res_latents = self.prepare_latents( |
| | (batch_size, channels, height, width), |
| | image_small.dtype, |
| | device, |
| | generator, |
| | super_res_latents, |
| | self.super_res_scheduler, |
| | ) |
| |
|
| | interpolate_antialias = {} |
| | if "antialias" in inspect.signature(F.interpolate).parameters: |
| | interpolate_antialias["antialias"] = True |
| |
|
| | image_upscaled = F.interpolate( |
| | image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias |
| | ) |
| |
|
| | for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): |
| | |
| |
|
| | if i == super_res_timesteps_tensor.shape[0] - 1: |
| | unet = self.super_res_last |
| | else: |
| | unet = self.super_res_first |
| |
|
| | latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) |
| |
|
| | noise_pred = unet( |
| | sample=latent_model_input, |
| | timestep=t, |
| | ).sample |
| |
|
| | if i + 1 == super_res_timesteps_tensor.shape[0]: |
| | prev_timestep = None |
| | else: |
| | prev_timestep = super_res_timesteps_tensor[i + 1] |
| |
|
| | |
| | super_res_latents = self.super_res_scheduler.step( |
| | noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator |
| | ).prev_sample |
| |
|
| | image = super_res_latents |
| | |
| |
|
| | |
| |
|
| | image = image * 0.5 + 0.5 |
| | image = image.clamp(0, 1) |
| | image = image.cpu().permute(0, 2, 3, 1).float().numpy() |
| |
|
| | if output_type == "pil": |
| | image = self.numpy_to_pil(image) |
| |
|
| | if not return_dict: |
| | return (image,) |
| |
|
| | return ImagePipelineOutput(images=image) |