| | import PIL |
| | from PIL import Image |
| | from typing import Callable, List, Optional, Union, Tuple, AnyStr |
| |
|
| | import numpy as np |
| | import torch |
| | import torch.nn.functional as F |
| | from torch.cuda.amp import custom_bwd, custom_fwd |
| | from torchvision import transforms |
| | from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput |
| | from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipeline |
| |
|
| | from methods.token2attn.attn_control import AttentionStore |
| | from methods.token2attn.ptp_utils import text_under_image, view_images |
| |
|
| |
|
| | class Token2AttnMixinASDSSDXLPipeline(StableDiffusionXLPipeline): |
| | r""" |
| | Pipeline for text-to-image generation using Stable Diffusion XL. |
| | This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the |
| | library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) |
| | Args: |
| | vae ([`AutoencoderKL`]): |
| | Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. |
| | text_encoder ([`CLIPTextModel`]): |
| | Frozen text-encoder. Stable Diffusion uses the text portion of |
| | [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically |
| | the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. |
| | tokenizer (`CLIPTokenizer`): |
| | Tokenizer of class |
| | [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). |
| | unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. |
| | scheduler ([`SchedulerMixin`]): |
| | A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of |
| | [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. |
| | safety_checker ([`StableDiffusionSafetyChecker`]): |
| | Classification module that estimates whether generated images could be considered offensive or harmful. |
| | Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. |
| | feature_extractor ([`CLIPFeatureExtractor`]): |
| | Model that extracts features from generated images to be used as inputs for the `safety_checker`. |
| | """ |
| | _optional_components = ["safety_checker", "feature_extractor"] |
| |
|
| | @torch.no_grad() |
| | def __call__( |
| | self, |
| | prompt: Union[str, List[str]], |
| | prompt_2: Optional[Union[str, List[str]]] = None, |
| | height: Optional[int] = None, |
| | width: Optional[int] = None, |
| | controller: AttentionStore = None, |
| | num_inference_steps: int = 50, |
| | denoising_end: Optional[float] = None, |
| | guidance_scale: float = 5.0, |
| | negative_prompt: Optional[Union[str, List[str]]] = None, |
| | negative_prompt_2: Optional[Union[str, List[str]]] = None, |
| | num_images_per_prompt: Optional[int] = 1, |
| | eta: float = 0.0, |
| | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
| | latents: Optional[torch.FloatTensor] = None, |
| | output_type: Optional[str] = "pil", |
| | return_dict: bool = True, |
| | callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, |
| | callback_steps: Optional[int] = 1, |
| | original_size: Optional[Tuple[int, int]] = None, |
| | crops_coords_top_left: Tuple[int, int] = (0, 0), |
| | target_size: Optional[Tuple[int, int]] = None, |
| | ): |
| | r""" |
| | Function invoked when calling the pipeline for generation. |
| | |
| | Args: |
| | prompt (`str` or `List[str]`, *optional*): |
| | The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. |
| | instead. |
| | prompt_2 (`str` or `List[str]`, *optional*): |
| | The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is |
| | used in both text-encoders |
| | height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): |
| | The height in pixels of the generated image. |
| | width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): |
| | The width in pixels of the generated image. |
| | num_inference_steps (`int`, *optional*, defaults to 50): |
| | The number of denoising steps. More denoising steps usually lead to a higher quality image at the |
| | expense of slower inference. |
| | denoising_end (`float`, *optional*): |
| | When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be |
| | completed before it is intentionally prematurely terminated. As a result, the returned sample will |
| | still retain a substantial amount of noise as determined by the discrete timesteps selected by the |
| | scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a |
| | "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image |
| | Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) |
| | guidance_scale (`float`, *optional*, defaults to 5.0): |
| | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). |
| | `guidance_scale` is defined as `w` of equation 2. of [Imagen |
| | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > |
| | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, |
| | usually at the expense of lower image quality. |
| | negative_prompt (`str` or `List[str]`, *optional*): |
| | The prompt or prompts not to guide the image generation. If not defined, one has to pass |
| | `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is |
| | less than `1`). |
| | negative_prompt_2 (`str` or `List[str]`, *optional*): |
| | The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and |
| | `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders |
| | num_images_per_prompt (`int`, *optional*, defaults to 1): |
| | The number of images to generate per prompt. |
| | eta (`float`, *optional*, defaults to 0.0): |
| | Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to |
| | [`schedulers.DDIMScheduler`], will be ignored for others. |
| | generator (`torch.Generator` or `List[torch.Generator]`, *optional*): |
| | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) |
| | to make generation deterministic. |
| | latents (`torch.FloatTensor`, *optional*): |
| | Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image |
| | generation. Can be used to tweak the same generation with different prompts. If not provided, a latents |
| | tensor will ge generated by sampling using the supplied random `generator`. |
| | output_type (`str`, *optional*, defaults to `"pil"`): |
| | The output format of the generate image. Choose between |
| | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. |
| | return_dict (`bool`, *optional*, defaults to `True`): |
| | Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead |
| | of a plain tuple. |
| | callback (`Callable`, *optional*): |
| | A function that will be called every `callback_steps` steps during inference. The function will be |
| | called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. |
| | callback_steps (`int`, *optional*, defaults to 1): |
| | The frequency at which the `callback` function will be called. If not specified, the callback will be |
| | called at every step. |
| | original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): |
| | If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. |
| | `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as |
| | explained in section 2.2 of |
| | [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). |
| | crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): |
| | `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position |
| | `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting |
| | `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of |
| | [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). |
| | target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): |
| | For most cases, `target_size` should be set to the desired height and width of the generated image. If |
| | not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in |
| | section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). |
| | |
| | Examples: |
| | |
| | Returns: |
| | [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: |
| | [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a |
| | `tuple`. When returning a tuple, the first element is a list with the generated images. |
| | """ |
| |
|
| | self.register_attention_control(controller) |
| |
|
| | |
| | height = height or self.unet.config.sample_size * self.vae_scale_factor |
| | width = width or self.unet.config.sample_size * self.vae_scale_factor |
| |
|
| | original_size = original_size or (height, width) |
| | target_size = target_size or (height, width) |
| |
|
| | |
| | self.check_inputs(prompt, prompt_2, height, width, callback_steps) |
| |
|
| | |
| | batch_size = 1 if isinstance(prompt, str) else len(prompt) |
| | device = self._execution_device |
| | |
| | |
| | |
| | do_classifier_free_guidance = guidance_scale > 1.0 |
| |
|
| | |
| | ( |
| | text_embeddings, |
| | negative_text_embeddings, |
| | pooled_text_embeddings, |
| | negative_pooled_text_embeddings, |
| | ) = self.encode_prompt( |
| | prompt=prompt, |
| | prompt_2=prompt_2, |
| | device=device, |
| | num_images_per_prompt=num_images_per_prompt, |
| | do_classifier_free_guidance=do_classifier_free_guidance, |
| | negative_prompt=negative_prompt, |
| | negative_prompt_2=negative_prompt_2, |
| | ) |
| |
|
| | |
| | self.scheduler.set_timesteps(num_inference_steps, device=device) |
| | timesteps = self.scheduler.timesteps |
| |
|
| | |
| | try: |
| | num_channels_latents = self.unet.config.in_channels |
| | except Exception or Warning: |
| | num_channels_latents = self.unet.in_channels |
| |
|
| | latents = self.prepare_latents( |
| | batch_size * num_images_per_prompt, |
| | num_channels_latents, |
| | height, |
| | width, |
| | text_embeddings.dtype, |
| | device, |
| | generator, |
| | latents, |
| | ) |
| |
|
| | |
| | extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
| |
|
| | |
| | add_text_embeddings = pooled_text_embeddings |
| | add_time_ids = self._get_add_time_ids( |
| | original_size, crops_coords_top_left, target_size, dtype=text_embeddings.dtype |
| | ) |
| |
|
| | if do_classifier_free_guidance: |
| | text_embeddings = torch.cat([negative_text_embeddings, text_embeddings], dim=0) |
| | add_text_embeddings = torch.cat([negative_pooled_text_embeddings, add_text_embeddings], dim=0) |
| | add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) |
| |
|
| | text_embeddings = text_embeddings.to(device) |
| | add_text_embeddings = add_text_embeddings.to(device) |
| | add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) |
| |
|
| | |
| |
|
| | |
| | if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: |
| | discrete_timestep_cutoff = int( |
| | round( |
| | self.scheduler.config.num_train_timesteps |
| | - (denoising_end * self.scheduler.config.num_train_timesteps) |
| | ) |
| | ) |
| | num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) |
| | timesteps = timesteps[:num_inference_steps] |
| |
|
| | num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order |
| | with self.progress_bar(total=num_inference_steps) as progress_bar: |
| | for i, t in enumerate(timesteps): |
| | |
| | latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents |
| | latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
| |
|
| | |
| | added_cond_kwargs = {"text_embeds": add_text_embeddings, "time_ids": add_time_ids} |
| | noise_pred = self.unet( |
| | latent_model_input, |
| | t, |
| | encoder_hidden_states=text_embeddings, |
| | added_cond_kwargs=added_cond_kwargs |
| | ).sample |
| |
|
| | |
| | if do_classifier_free_guidance: |
| | noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| | noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
| |
|
| | |
| | latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample |
| |
|
| | |
| | latents = controller.step_callback(latents) |
| |
|
| | |
| | if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
| | progress_bar.update() |
| | if callback is not None and i % callback_steps == 0: |
| | callback(i, t, latents) |
| |
|
| | |
| |
|
| | |
| | |
| |
|
| | |
| | if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: |
| | self.upcast_vae() |
| | latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) |
| |
|
| | if not output_type == "latent": |
| | image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] |
| | else: |
| | image = latents |
| | return StableDiffusionXLPipelineOutput(images=image) |
| |
|
| | |
| | if self.watermark is not None: |
| | image = self.watermark.apply_watermark(image) |
| |
|
| | image = self.image_processor.postprocess(image, output_type=output_type) |
| |
|
| | if not return_dict: |
| | return (image,) |
| |
|
| | return StableDiffusionXLPipelineOutput(images=image) |
| |
|
| | def encode2latents(self, |
| | image, |
| | batch_size, |
| | num_images_per_prompt, |
| | dtype, |
| | device, |
| | generator=None): |
| | if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): |
| | raise ValueError( |
| | f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" |
| | ) |
| |
|
| | |
| | if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: |
| | self.text_encoder_2.to("cpu") |
| | torch.cuda.empty_cache() |
| |
|
| | image = image.to(device=device, dtype=dtype) |
| |
|
| | batch_size = batch_size * num_images_per_prompt |
| |
|
| | if image.shape[1] == 4: |
| | init_latents = image |
| | else: |
| | |
| | if self.vae.config.force_upcast: |
| | image = image.float() |
| | self.vae.to(dtype=torch.float32) |
| |
|
| | if isinstance(generator, list) and len(generator) != batch_size: |
| | raise ValueError( |
| | f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
| | f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
| | ) |
| |
|
| | elif isinstance(generator, list): |
| | init_latents = [ |
| | self.vae.encode(image[i: i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) |
| | ] |
| | init_latents = torch.cat(init_latents, dim=0) |
| | else: |
| | init_latents = self.vae.encode(image).latent_dist.sample(generator) |
| |
|
| | if self.vae.config.force_upcast: |
| | self.vae.to(dtype) |
| |
|
| | init_latents = init_latents.to(dtype) |
| | init_latents = self.vae.config.scaling_factor * init_latents |
| |
|
| | if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: |
| | |
| | additional_image_per_prompt = batch_size // init_latents.shape[0] |
| | init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) |
| | elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: |
| | raise ValueError( |
| | f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." |
| | ) |
| | else: |
| | init_latents = torch.cat([init_latents], dim=0) |
| |
|
| | latents = init_latents |
| |
|
| | return latents |
| |
|
| | @staticmethod |
| | def S_aug(sketch: torch.Tensor, |
| | im_res: int = 1024, |
| | augments: str = "affine_contrast"): |
| | |
| | augment_list = [] |
| | if "affine" in augments: |
| | augment_list.append( |
| | transforms.RandomPerspective(fill=0, p=1.0, distortion_scale=0.5) |
| | ) |
| | augment_list.append( |
| | transforms.RandomResizedCrop(im_res, scale=(0.8, 0.8), ratio=(1.0, 1.0)) |
| | ) |
| | if "contrast" in augments: |
| | |
| | augment_list.append( |
| | transforms.RandomAdjustSharpness(sharpness_factor=2) |
| | ) |
| | augment_compose = transforms.Compose(augment_list) |
| |
|
| | return augment_compose(sketch) |
| |
|
| | def score_distillation_sampling(self, |
| | pred_rgb: torch.Tensor, |
| | crop_size: int, |
| | augments: str, |
| | prompt: Union[List, str], |
| | prompt_2: Optional[Union[List, str]] = None, |
| | height: Optional[int] = None, |
| | width: Optional[int] = None, |
| | negative_prompt: Union[List, str] = None, |
| | negative_prompt_2: Optional[Union[List, str]] = None, |
| | guidance_scale: float = 100, |
| | as_latent: bool = False, |
| | grad_scale: float = 1, |
| | t_range: Union[List[float], Tuple[float]] = (0.05, 0.95), |
| | original_size: Optional[Tuple[int, int]] = None, |
| | crops_coords_top_left: Tuple[int, int] = (0, 0), |
| | target_size: Optional[Tuple[int, int]] = None): |
| |
|
| | height = height or self.unet.config.sample_size * self.vae_scale_factor |
| | width = width or self.unet.config.sample_size * self.vae_scale_factor |
| |
|
| | original_size = original_size or (height, width) |
| | target_size = target_size or (height, width) |
| |
|
| | batch_size = 1 if isinstance(prompt, str) else len(prompt) |
| |
|
| | num_train_timesteps = self.scheduler.config.num_train_timesteps |
| | min_step = int(num_train_timesteps * t_range[0]) |
| | max_step = int(num_train_timesteps * t_range[1]) |
| | alphas = self.scheduler.alphas_cumprod.to(self.device) |
| |
|
| | num_images_per_prompt = 1 |
| |
|
| | |
| | do_classifier_free_guidance = guidance_scale > 1.0 |
| | ( |
| | text_embeddings, |
| | negative_text_embeddings, |
| | pooled_text_embeddings, |
| | negative_pooled_text_embeddings, |
| | ) = self.encode_prompt( |
| | prompt=prompt, |
| | prompt_2=prompt_2, |
| | device=self.device, |
| | num_images_per_prompt=num_images_per_prompt, |
| | do_classifier_free_guidance=do_classifier_free_guidance, |
| | negative_prompt=negative_prompt, |
| | negative_prompt_2=negative_prompt_2, |
| | ) |
| |
|
| | |
| | pred_rgb_a = self.S_aug(pred_rgb, crop_size, augments) |
| |
|
| | |
| | if as_latent: |
| | latents = F.interpolate(pred_rgb_a, (128, 128), mode='bilinear', align_corners=False) * 2 - 1 |
| | else: |
| | |
| | latents = self.encode2latents( |
| | pred_rgb_a, |
| | batch_size, |
| | num_images_per_prompt, |
| | text_embeddings.dtype, |
| | self.device |
| | ) |
| |
|
| | |
| | t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device) |
| |
|
| | |
| | add_text_embeddings = pooled_text_embeddings |
| | add_time_ids = self._get_add_time_ids( |
| | original_size, crops_coords_top_left, target_size, dtype=text_embeddings.dtype |
| | ) |
| |
|
| | if do_classifier_free_guidance: |
| | text_embeddings = torch.cat([negative_text_embeddings, text_embeddings], dim=0) |
| | add_text_embeddings = torch.cat([negative_pooled_text_embeddings, add_text_embeddings], dim=0) |
| | add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) |
| |
|
| | text_embeddings = text_embeddings.to(self.device) |
| | add_text_embeddings = add_text_embeddings.to(self.device) |
| | add_time_ids = add_time_ids.to(self.device).repeat(batch_size * num_images_per_prompt, 1) |
| |
|
| | |
| | with torch.no_grad(): |
| | |
| | noise = torch.randn_like(latents) |
| | latents_noisy = self.scheduler.add_noise(latents, noise, t) |
| | |
| | latent_model_input = torch.cat([latents_noisy] * 2) if do_classifier_free_guidance else latents_noisy |
| | |
| | added_cond_kwargs = {"text_embeds": add_text_embeddings, "time_ids": add_time_ids} |
| | noise_pred = self.unet( |
| | latent_model_input, |
| | t, |
| | encoder_hidden_states=text_embeddings, |
| | added_cond_kwargs=added_cond_kwargs |
| | ).sample |
| |
|
| | |
| | if do_classifier_free_guidance: |
| | noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) |
| | noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) |
| |
|
| | |
| | w = (1 - alphas[t]) |
| | grad = grad_scale * w * (noise_pred - noise) |
| | grad = torch.nan_to_num(grad) |
| |
|
| | |
| | loss = SpecifyGradient.apply(latents, grad) |
| |
|
| | return loss, grad.mean() |
| |
|
| | def register_attention_control(self, controller): |
| | attn_procs = {} |
| | cross_att_count = 0 |
| | for name in self.unet.attn_processors.keys(): |
| | cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim |
| | if name.startswith("mid_block"): |
| | hidden_size = self.unet.config.block_out_channels[-1] |
| | place_in_unet = "mid" |
| | elif name.startswith("up_blocks"): |
| | block_id = int(name[len("up_blocks.")]) |
| | hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id] |
| | place_in_unet = "up" |
| | elif name.startswith("down_blocks"): |
| | block_id = int(name[len("down_blocks.")]) |
| | hidden_size = self.unet.config.block_out_channels[block_id] |
| | place_in_unet = "down" |
| | else: |
| | continue |
| | cross_att_count += 1 |
| | attn_procs[name] = P2PCrossAttnProcessor( |
| | controller=controller, place_in_unet=place_in_unet |
| | ) |
| |
|
| | self.unet.set_attn_processor(attn_procs) |
| | controller.num_att_layers = cross_att_count |
| |
|
| | @staticmethod |
| | def aggregate_attention(prompts, |
| | attention_store: AttentionStore, |
| | res: int, |
| | from_where: List[str], |
| | is_cross: bool, |
| | select: int): |
| | if isinstance(prompts, str): |
| | prompts = [prompts] |
| | assert isinstance(prompts, list) |
| |
|
| | out = [] |
| | attention_maps = attention_store.get_average_attention() |
| | num_pixels = res ** 2 |
| | for location in from_where: |
| | for item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: |
| | if item.shape[1] == num_pixels: |
| | cross_maps = item.reshape(len(prompts), -1, res, res, item.shape[-1])[select] |
| | out.append(cross_maps) |
| | out = torch.cat(out, dim=0) |
| | out = out.sum(0) / out.shape[0] |
| | return out.cpu() |
| |
|
| | def get_cross_attention(self, |
| | prompts, |
| | attention_store: AttentionStore, |
| | res: int, |
| | from_where: List[str], |
| | select: int = 0, |
| | save_path=None): |
| | tokens = self.tokenizer.encode(prompts[select]) |
| | decoder = self.tokenizer.decode |
| | |
| | attention_maps = self.aggregate_attention(prompts, attention_store, res, from_where, True, select) |
| |
|
| | images = [] |
| | for i in range(len(tokens)): |
| | image = attention_maps[:, :, i] |
| | image = 255 * image / image.max() |
| | image = image.unsqueeze(-1).expand(*image.shape, 3) |
| | image = image.numpy().astype(np.uint8) |
| | image = np.array(Image.fromarray(image).resize((256, 256))) |
| | image = text_under_image(image, decoder(int(tokens[i]))) |
| | images.append(image) |
| | image_array = np.stack(images, axis=0) |
| | view_images(image_array, save_image=True, fp=save_path) |
| |
|
| | return attention_maps, tokens |
| |
|
| | def get_self_attention_comp(self, |
| | prompts, |
| | attention_store: AttentionStore, |
| | res: int, |
| | from_where: List[str], |
| | img_size: int = 224, |
| | max_com=10, |
| | select: int = 0, |
| | save_path: AnyStr = None): |
| | attention_maps = self.aggregate_attention(prompts, attention_store, res, from_where, False, select) |
| | attention_maps = attention_maps.numpy().reshape((res ** 2, res ** 2)) |
| | |
| | u, s, vh = np.linalg.svd(attention_maps - np.mean(attention_maps, axis=1, keepdims=True)) |
| | print(f"self-attention_maps: {attention_maps.shape}, " |
| | f"u: {u.shape}, " |
| | f"s: {s.shape}, " |
| | f"vh: {vh.shape}") |
| |
|
| | images = [] |
| | vh_returns = [] |
| | for i in range(max_com): |
| | image = vh[i].reshape(res, res) |
| | image = (image - image.min()) / (image.max() - image.min()) |
| | image = 255 * image |
| |
|
| | ret_ = Image.fromarray(image).resize((img_size, img_size), resample=PIL.Image.Resampling.BILINEAR) |
| | vh_returns.append(np.array(ret_)) |
| |
|
| | image = np.repeat(np.expand_dims(image, axis=2), 3, axis=2).astype(np.uint8) |
| | image = Image.fromarray(image).resize((256, 256)) |
| | image = np.array(image) |
| | images.append(image) |
| | image_array = np.stack(images, axis=0) |
| | view_images(image_array, num_rows=max_com // 10, offset_ratio=0, |
| | save_image=True, fp=save_path / "self-attn-vh.png") |
| |
|
| | return attention_maps, (u, s, vh), np.stack(vh_returns, axis=0) |
| |
|
| |
|
| | class P2PCrossAttnProcessor: |
| |
|
| | def __init__(self, controller, place_in_unet): |
| | super().__init__() |
| | self.controller = controller |
| | self.place_in_unet = place_in_unet |
| |
|
| | def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): |
| | batch_size, sequence_length, _ = hidden_states.shape |
| | attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size=batch_size) |
| |
|
| | query = attn.to_q(hidden_states) |
| |
|
| | is_cross = encoder_hidden_states is not None |
| | encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states |
| | key = attn.to_k(encoder_hidden_states) |
| | value = attn.to_v(encoder_hidden_states) |
| |
|
| | query = attn.head_to_batch_dim(query) |
| | key = attn.head_to_batch_dim(key) |
| | value = attn.head_to_batch_dim(value) |
| |
|
| | attention_probs = attn.get_attention_scores(query, key, attention_mask) |
| |
|
| | |
| | self.controller(attention_probs, is_cross, self.place_in_unet) |
| |
|
| | hidden_states = torch.bmm(attention_probs, value) |
| | hidden_states = attn.batch_to_head_dim(hidden_states) |
| |
|
| | |
| | hidden_states = attn.to_out[0](hidden_states) |
| | |
| | hidden_states = attn.to_out[1](hidden_states) |
| |
|
| | return hidden_states |
| |
|
| |
|
| | class SpecifyGradient(torch.autograd.Function): |
| |
|
| | @staticmethod |
| | @custom_fwd |
| | def forward(ctx, input_tensor, gt_grad): |
| | ctx.save_for_backward(gt_grad) |
| | |
| | return torch.ones([1], device=input_tensor.device, dtype=input_tensor.dtype) |
| |
|
| | @staticmethod |
| | @custom_bwd |
| | def backward(ctx, grad_scale): |
| | gt_grad, = ctx.saved_tensors |
| | gt_grad = gt_grad * grad_scale |
| | return gt_grad, None |
| |
|